Unable to access services running on Hashicorp Nomad via browser - nomad

I started two Nomad jobs, one for PostgreSQL and another on for pgAdmin on the Nomad dev on my MacOS. The jobspecs are these:
## postgres.nomad
job "postgres" {
datacenters = ["dc1"]
type = "service"
group "postgres" {
count = 1
task "postgres" {
driver = "docker"
config {
image = "postgres"
network_mode = "host"
port_map {
db = 5432
}
}
env {
POSTGRES_USER="postgres"
POSTGRES_PASSWORD="postgres"
}
logs {
max_files = 5
max_file_size = 15
}
resources {
cpu = 1000
memory = 1024
network {
mbits = 10
port "db" {
static = 5432
}
}
}
service {
name = "postgres"
tags = ["postgres for vault"]
port = "db"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
}
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
}
update {
max_parallel = 1
min_healthy_time = "5s"
healthy_deadline = "5m"
auto_revert = false
canary = 0
}
}
## pgadmin.nomad
job "pgadmin4" {
datacenters = ["dc1"]
type = "service"
group "pgadmin4" {
count = 1
task "pgadmin4" {
driver = "docker"
config {
image = "dpage/pgadmin4"
network_mode = "host"
port_map {
db = 8080
}
volumes = [
"local/servers.json:/servers.json",
"local/servers.passfile:/root/.pgpass"
]
}
template {
perms = "600"
change_mode = "noop"
destination = "local/servers.passfile"
data = <<EOH
postgres.service.consul:5432:postgres:postgres:postgres
EOH
}
template {
change_mode = "noop"
destination = "local/servers.json"
data = <<EOH
{
"Servers": {
"1": {
"Name": "Local Server",
"Group": "Server Group 1",
"Port": "5432",
"Username": "root",
"PassFile": "/root/.pgpass",
"Host": "postgres.service.consul",
"SSLMode": "disable",
"MaintenanceDB": "postgres"
}
}
}
EOH
}
env {
PGADMIN_DEFAULT_EMAIL="youremail#yourdomain.com"
PGADMIN_DEFAULT_PASSWORD="yoursecurepassword"
PGADMIN_LISTEN_PORT="5050"
PGADMIN_CONFIG_ENHANCED_COOKIE_PROTECTION="False"
PGADMIN_SERVER_JSON_FILE="/servers.json"
}
logs {
max_files = 5
max_file_size = 15
}
resources {
cpu = 1000
memory = 1024
network {
mbits = 10
port "ui" {
static = 5050
}
}
}
service {
name = "pgadmin"
tags = [ "urlprefix-/pgadmin strip=/pgadmin"]
port = "ui"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
}
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
}
update {
max_parallel = 1
min_healthy_time = "5s"
healthy_deadline = "3m"
auto_revert = false
canary = 0
}
}
All of the jobs have been deployed successfully and has their status as running.
As seen in the jobspecs, pgadmin should be running in localhost:5050 but whenever I try to reach this address in the browser I get the "Can't connect to the server" error. Is there any configuration missing?

Your exposed a wrong port name.
Now, network is only available in job -> group -> network, see here
please avoid use network_mode = "host" if you just want to expose a single port.
here's avaliable config
job "postgres" {
datacenters = ["dc1"]
type = "service"
group "postgres" {
count = 1
task "postgres" {
driver = "docker"
config {
image = "postgres"
ports = ["db"]
}
env {
POSTGRES_USER="postgres"
POSTGRES_PASSWORD="postgres"
}
logs {
max_files = 5
max_file_size = 15
}
resources {
cpu = 1000
memory = 1024
}
service {
name = "postgres"
tags = ["postgres for vault"]
port = "db"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
}
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
network {
mbits = 10
port "db" {
static = 5432
}
}
}
update {
max_parallel = 1
min_healthy_time = "5s"
healthy_deadline = "5m"
auto_revert = false
canary = 0
}
}
job "pgadmin4" {
datacenters = ["dc1"]
type = "service"
group "pgadmin4" {
count = 1
task "pgadmin4" {
driver = "docker"
config {
image = "dpage/pgadmin4"
ports = ["ui"]
volumes = [
"local/servers.json:/servers.json",
"local/servers.passfile:/root/.pgpass"
]
}
template {
perms = "600"
change_mode = "noop"
destination = "local/servers.passfile"
data = <<EOH
postgres.service.consul:5432:postgres:postgres:postgres
EOH
}
template {
change_mode = "noop"
destination = "local/servers.json"
data = <<EOH
{
"Servers": {
"1": {
"Name": "Local Server",
"Group": "Server Group 1",
"Port": "5432",
"Username": "root",
"PassFile": "/root/.pgpass",
"Host": "postgres.service.consul",
"SSLMode": "disable",
"MaintenanceDB": "postgres"
}
}
}
EOH
}
env {
PGADMIN_DEFAULT_EMAIL="youremail#yourdomain.com"
PGADMIN_DEFAULT_PASSWORD="yoursecurepassword"
PGADMIN_LISTEN_PORT="5050"
PGADMIN_CONFIG_ENHANCED_COOKIE_PROTECTION="False"
PGADMIN_SERVER_JSON_FILE="/servers.json"
}
logs {
max_files = 5
max_file_size = 15
}
resources {
cpu = 1000
memory = 1024
}
service {
name = "pgadmin"
tags = [ "urlprefix-/pgadmin strip=/pgadmin"]
port = "ui"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
}
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
network {
mbits = 10
port "ui" {
static = 5050
}
}
}
update {
max_parallel = 1
min_healthy_time = "5s"
healthy_deadline = "3m"
auto_revert = false
canary = 0
}
}

Related

Neovim DAP not skipping node internals when debugging async method

I'm a newcomer to neovim and currently having trouble getting DAP to work. More specifically, getting skipFiles to work. I've tried out many different patterns for skipFiles and found no success.
Every time I put a breakpoint in a jest test or a mocha test and it runs into an async method, it takes me to a promiseAfterHook function, then to primiseBeforeHook, then to promiseRejectHandler, then to primiseAfterHook, then to processTicksAndRejections, then to emitHook... I believe these are methods in node_internals that should be ignored.
But then in the test below it doesn't jump to node internal method for console.log and steps over it just fine so this may just be a debugger issue handling async await?
it('testing debugger', async () => {
B 5 console.log('hello');
▎ 4 console.log('hi');
▎ 3 const x = 5 + 50;
▎ 2 const y = 6;
▎ 1 console.log(x + y)
契47 const somePromise = await thisIsTheProblemLine();
Any help would be appreciated. Thank you.
Here are my dap configs:
local home = os.getenv("HOME")
local dap = require("dap")
dap.adapters.node2 = {
type = "executable",
command = "node",
args = { home .. "/personal/microsoft-sucks/vscode-node-debug2/out/src/nodeDebug.js" },
}
dap.configurations.javascript = {
{
name = "Launch",
type = "node2",
request = "launch",
program = "${file}",
cwd = vim.loop.cwd(),
sourceMaps = true,
protocol = "inspector",
console = "integratedTerminal",
},
{
-- For this to work you need to make sure the node process
-- is started with the `--inspect` flag.
name = "Attach to process",
type = "node2",
request = "attach",
processId = require("dap.utils").pick_process,
},
{
name = "Debug Mocha Tests",
type = "node2",
request = "launch",
cwd = vim.loop.cwd(),
runtimeArgs = {
"--inspect-brk",
"${workspaceFolder}/node_modules/mocha/bin/_mocha",
},
sourceMaps = true,
protocol = "inspector",
runtimeExecutable = "node",
args = {"inspect", "${file}" },
port = 9229,
console = "integratedTerminal",
skipFiles = {
"<node_internals>/**",
"node_modules/**",
}
},
}
dap.configurations.typescript = {
{
name = "ts-node (Node2 with ts-node)",
type = "node2",
request = "launch",
cwd = vim.loop.cwd(),
runtimeArgs = { "-r", "ts-node/register" },
runtimeExecutable = "node",
args = { "--inspect", "${file}" },
sourceMaps = true,
skipFiles = { "<node_internals>/**", "node_modules/**" },
},
{
name = "Jest (Node2 with ts-node)",
type = "node2",
request = "launch",
cwd = vim.loop.cwd(),
runtimeArgs = { "--inspect-brk", "${workspaceFolder}/node_modules/.bin/jest" },
runtimeExecutable = "node",
args = { "${file}", "--runInBand", "--coverage", "false" },
sourceMaps = true,
port = 9229,
skipFiles = {
"<node_internals>/**",
"node_modules/**",
},
},
}
https://pastebin.pl/view/81961f9a

Trying to expose aws_api_gatewayv2 to domain name, getting no route to host

I have created an API Gateway v2 which exposes a single AWS Lambda which I intend to use to expose my entire REST API through. So far I have this working just fine by following the hashicorp tutorial.
// S3 bucket for code release and updates.
resource "aws_s3_bucket" "lambda_zips" {
bucket = "tdweb-lambda-zips"
acl = "private"
force_destroy = true
}
data "archive_file" "web_api_lambda" {
type = "zip"
source_dir = "${path.module}/web-api"
output_path = "${path.module}/web-api.zip"
}
resource "aws_s3_object" "web_api" {
bucket = aws_s3_bucket.lambda_zips.id
key = "web-api.zip"
source = data.archive_file.web_api_lambda.output_path
etag = filemd5(data.archive_file.web_api_lambda.output_path)
}
// Lambda and associated IAM roles and permissions.
resource "aws_lambda_function" "web_api" {
function_name = "web_api_lambda"
s3_bucket = aws_s3_bucket.lambda_zips.id
s3_key = aws_s3_object.web_api.key
runtime = "python3.9"
handler = "lambda_function.lambda_handler"
source_code_hash = data.archive_file.web_api_lambda.output_base64sha256
role = aws_iam_role.web_api_lambda.arn
}
resource "aws_cloudwatch_log_group" "web_api_lambda" {
name = "/aws/lambda/${aws_lambda_function.web_api.function_name}"
retention_in_days = 30
}
data "aws_iam_policy_document" "web_api_assume_role" {
statement {
effect = "Allow"
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["lambda.amazonaws.com"]
}
}
}
resource "aws_iam_role" "web_api_lambda" {
name = "web_api_lambda_role"
assume_role_policy = data.aws_iam_policy_document.web_api_assume_role.json
}
data "aws_iam_policy_document" "web_api_lambda" {
statement {
effect = "Allow"
actions = [
"logs:CreateLogStream",
"logs:CreateLogDelivery",
"logs:PutLogEvents"
]
resources = ["arn:aws:logs:*:*:*"]
}
statement {
effect = "Allow"
actions = [
"dynamodb:*"
]
resources = ["*"]
}
statement {
effect = "Allow"
actions = [
"s3:*",
]
resources = ["*"]
}
}
resource "aws_iam_role_policy" "web_api_lambda" {
name = "web_api_lambda_policy"
policy = data.aws_iam_policy_document.web_api_lambda.json
role = aws_iam_role.web_api_lambda.id
}
// API Gateway
resource "aws_apigatewayv2_api" "web_api" {
name = "web_api_gateway"
protocol_type = "HTTP"
}
resource "aws_apigatewayv2_stage" "web_api_prod" {
api_id = aws_apigatewayv2_api.web_api.id
name = "prod"
auto_deploy = true
access_log_settings {
destination_arn = aws_cloudwatch_log_group.web_api_gateway.arn
format = jsonencode({
requestId = "$context.requestId"
sourceIp = "$context.identity.sourceIp"
requestTime = "$context.requestTime"
protocol = "$context.protocol"
httpMethod = "$context.httpMethod"
resourcePath = "$context.resourcePath"
routeKey = "$context.routeKey"
status = "$context.status"
responseLength = "$context.responseLength"
integrationErrorMessage = "$context.integrationErrorMessage"
}
)
}
}
resource "aws_apigatewayv2_integration" "web_api" {
api_id = aws_apigatewayv2_api.web_api.id
integration_uri = aws_lambda_function.web_api.invoke_arn
integration_type = "AWS_PROXY"
integration_method = "POST"
}
resource "aws_apigatewayv2_route" "web_api_prod" {
api_id = aws_apigatewayv2_api.web_api.id
route_key = "GET /"
target = "integrations/${aws_apigatewayv2_integration.web_api.id}"
}
resource "aws_cloudwatch_log_group" "web_api_gateway" {
name = "/aws/api_gw/${aws_apigatewayv2_api.web_api.name}"
retention_in_days = 30
}
resource "aws_lambda_permission" "web_api_gateway" {
statement_id = "AllowExecutionFromAPIGateway"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.web_api.function_name
principal = "apigateway.amazonaws.com"
source_arn = "${aws_apigatewayv2_api.web_api.execution_arn}/*/*"
}
output "base_url" {
value = aws_apigatewayv2_stage.web_api_prod.invoke_url
}
Now I can do a simple curl of the output base_url and I see the response from my Lambda.
I would like for this API to be exposed to via my domain name. So I've add this:
resource "aws_apigatewayv2_domain_name" "web_api" {
domain_name = "prod.${var.domain_name}"
domain_name_configuration {
certificate_arn = aws_acm_certificate.main.arn
endpoint_type = "REGIONAL"
security_policy = "TLS_1_2"
}
}
resource "aws_apigatewayv2_api_mapping" "web_api" {
api_id = aws_apigatewayv2_api.web_api.id
domain_name = aws_apigatewayv2_domain_name.web_api.id
stage = aws_apigatewayv2_stage.web_api_prod.id
}
The certificate is working with an existing web server running on EC2 and is working. However, then I try to curl prod.mydomainname.com I get an error of:
curl: (6) Could not resolve host: prod.mydomainname.com
I am not sure why it's not exposed.
I missed the route53 entry.
resource "aws_route53_record" "web_api" {
zone_id = aws_route53_zone.external.zone_id
name = aws_apigatewayv2_domain_name.web_api.domain_name
type = "A"
alias {
name = aws_apigatewayv2_domain_name.web_api.domain_name_configuration[0].target_domain_name
zone_id = aws_apigatewayv2_domain_name.web_api.domain_name_configuration[0].hosted_zone_id
evaluate_target_health = false
}
}

Springboot lambda on localstack with terraform, docker: Error loading class StreamLambdaHandler: Metaspace","errorType":"java.lang.OutOfMemoryError

I have created a rest api lambda using spring boot. When I create a jar file from this and deploy it to localstack with terraform, i can call the api and it works as expected.
But when I instead create a docker image from my code and adopt terraform to use that image_uri I get the following error when I call the api.
Lambda runtime initialization error for function arn:aws:lambda:eu-west-2:000000000000:function:restapi: b'{"errorMessage":"Error loading class com.example.lambda.StreamLambdaHandler: Metaspace","errorType":"java.lang.OutOfMemoryError"}'
And this is the terraform:
variable "STAGE" {
type = string
default = "local"
}
variable "AWS_REGION" {
type = string
default = "eu-west-2"
}
variable "IMG_URI" {
type = string
default = "localhost:4510/com.example-restapi-lambda:1.0.0"
}
variable "FUNCTION_NAME" {
type = string
default = "restapi"
}
variable "FUNCTION_HANDLER" {
type = string
default = "com.example.lambda.StreamLambdaHandler"
}
provider "aws" {
access_key = "test_access_key"
secret_key = "test_secret_key"
region = var.AWS_REGION
s3_force_path_style = true
skip_credentials_validation = true
skip_metadata_api_check = true
skip_requesting_account_id = true
endpoints {
apigateway = var.STAGE == "local" ? "http://localhost:4566" : null
cloudformation = var.STAGE == "local" ? "http://localhost:4566" : null
cloudwatch = var.STAGE == "local" ? "http://localhost:4566" : null
cloudwatchevents = var.STAGE == "local" ? "http://localhost:4566" : null
iam = var.STAGE == "local" ? "http://localhost:4566" : null
lambda = var.STAGE == "local" ? "http://localhost:4566" : null
s3 = var.STAGE == "local" ? "http://localhost:4566" : null
}
}
resource "aws_iam_role" "lambda-execution-role" {
name = "lambda-execution-role-${var.FUNCTION_NAME}"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_lambda_function" "restApiLambdaFunction" {
image_uri = var.IMG_URI
function_name = var.FUNCTION_NAME
role = aws_iam_role.lambda-execution-role.arn
handler = var.FUNCTION_HANDLER
# handler = "org.springframework.cloud.function.adapter.aws.FunctionInvoker"
runtime = "java11"
timeout = 60
environment {
variables = {
MAIN_CLASS = "com.example.lambda.AWSLambdaApp"
# JAVA_OPTS = "-Xmx5g"
}
}
}
resource "aws_api_gateway_rest_api" "rest-api" {
name = "RestApi-${var.FUNCTION_NAME}"
}
resource "aws_api_gateway_resource" "proxy" {
rest_api_id = aws_api_gateway_rest_api.rest-api.id
parent_id = aws_api_gateway_rest_api.rest-api.root_resource_id
path_part = "{proxy+}"
}
resource "aws_api_gateway_method" "proxy" {
rest_api_id = aws_api_gateway_rest_api.rest-api.id
resource_id = aws_api_gateway_resource.proxy.id
http_method = "ANY"
authorization = "NONE"
}
resource "aws_api_gateway_integration" "proxy" {
rest_api_id = aws_api_gateway_rest_api.rest-api.id
resource_id = aws_api_gateway_method.proxy.resource_id
http_method = aws_api_gateway_method.proxy.http_method
integration_http_method = "POST"
type = "AWS_PROXY"
uri = aws_lambda_function.restApiLambdaFunction.invoke_arn
}
resource "aws_api_gateway_deployment" "rest-api-deployment" {
depends_on = [aws_api_gateway_integration.proxy]
rest_api_id = aws_api_gateway_rest_api.rest-api.id
stage_name = var.STAGE
}
resource "aws_cloudwatch_event_rule" "warmup" {
name = "warmup-event-rule-${var.FUNCTION_NAME}"
schedule_expression = "rate(10 minutes)"
}
resource "aws_cloudwatch_event_target" "warmup" {
target_id = "warmup"
rule = aws_cloudwatch_event_rule.warmup.name
arn = aws_lambda_function.restApiLambdaFunction.arn
input = "{\"httpMethod\": \"SCHEDULE\", \"path\": \"warmup\"}"
}
resource "aws_lambda_permission" "warmup-permission" {
statement_id = "AllowExecutionFromCloudWatch"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.restApiLambdaFunction.function_name
principal = "events.amazonaws.com"
source_arn = aws_cloudwatch_event_rule.warmup.arn
}
The closest thing i have seen as a solution is passing JAVA_OPTS to a docker image increasing the available memory but not sure how to do that via a terraform. Although not sure if that would solve the problem.
Any guidance would be greatly appreciated.
As per my comment, there is a memory_size option [1] in the aws_lambda_function resource. If not defined it will default to 128MB. It probably needs to be increased in order to avoid OOM error.
[1] https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_function#memory_size

Return multiple results with index terraform

I currently have the below lists,
local.security_list =
[
{
ocid = security_list_id_1
name = subnet1
},
{
ocid = security_list_id_2
name = subnet1
},
{
ocid = security_list_id_3
name = subnet2
},
{
name = subnet2
ocid = security_list_id_4
},
]
var.vcn_security_lists = [
{
security_list_name = security_list_1
subnet_name = subnet1
},
{
security_list_name = security_list_2
subnet_name = subnet1
},
{
security_list_name = security_list_3
subnet_name = subnet2
}
]
I want to create the security lists using the var.vcn_security_lists list and then assign them to the subnet later on by using the subnet_name field. I have done this previously using the index() function as below, which loops through a subnet and pulls out the security list with the same subnet name. The problem is that this only returns one item - how can I return multiple from a list ?
subnet_security_list_ocid = [local.security_list[index(local.security_list.*.name,var.vcn_security_lists[index(var.vcn_security_lists.*.subnet_name,var.vcn_subnets[count.index].subnet_name)].security_list_name)].id]

How to config Tarantool Cartridge cluster automatically?

I have an application developed on Tarantool Cartridge. My cluster consists of 12 Tarantool instances divided through 4 replica sets (some are routers, some are storages). Now starting setup this cluster from scratch, I configure this cluster manually on Web UI admin application: create replica sets, bootstrap storages, configure failover mode.
How can I do this configuration automatically? What is the best way?
There are such tools:
Ansible role: https://github.com/tarantool/ansible-cartridge
Kubernetes operator: https://github.com/tarantool/tarantool-operator
You can start all tarantool instances that should be included in the cluster, connect to one of them via tarantoolctl and execute a script that determines the cluster topology, initialize sharding via vshard, and configure the failover mode (in the example, via etcd).
cartridge = require('cartridge')
replicasets = { {
alias = 'router1',
roles = { 'router', 'vshard-router', 'failover-coordinator' },
join_servers = { { uri = '172.20.0.2:3301' } }
}, {
alias = 'router2',
roles = { 'router', 'vshard-router', 'failover-coordinator' },
join_servers = { { uri = '172.20.0.3:3301' } }
}, {
alias = 'router3',
roles = { 'router', 'vshard-router', 'failover-coordinator' },
join_servers = { { uri = '172.20.0.4:3301' } }
}, {
alias = 'storage1',
roles = { 'storage', 'vshard-storage' },
join_servers = { { uri = '172.20.0.2:3302' },
{ uri = '172.20.0.3:3302' },
{ uri = '172.20.0.4:3302' } }
}, {
alias = 'storage2',
roles = { 'storage', 'vshard-storage' },
join_servers = { { uri = '172.20.0.3:3303' },
{ uri = '172.20.0.2:3303' },
{ uri = '172.20.0.4:3303' } }
}, {
alias = 'storage3',
roles = { 'storage', 'vshard-storage' },
join_servers = { { uri = '172.20.0.4:3304' },
{ uri = '172.20.0.2:3304' },
{ uri = '172.20.0.3:3304' } }
} }
cartridge.admin_edit_topology({ replicasets = replicasets })
cartridge.admin_bootstrap_vshard()
cartridge.failover_set_params({
mode = 'stateful',
state_provider = 'etcd2',
etcd2_params = {
prefix = '/',
lock_delay = 10,
endpoints = { '172.20.0.2:2379', '172.20.0.3:2379', '172.20.0.4:2379' }
}
})

Resources