I have an application developed on Tarantool Cartridge. My cluster consists of 12 Tarantool instances divided through 4 replica sets (some are routers, some are storages). Now starting setup this cluster from scratch, I configure this cluster manually on Web UI admin application: create replica sets, bootstrap storages, configure failover mode.
How can I do this configuration automatically? What is the best way?
There are such tools:
Ansible role: https://github.com/tarantool/ansible-cartridge
Kubernetes operator: https://github.com/tarantool/tarantool-operator
You can start all tarantool instances that should be included in the cluster, connect to one of them via tarantoolctl and execute a script that determines the cluster topology, initialize sharding via vshard, and configure the failover mode (in the example, via etcd).
cartridge = require('cartridge')
replicasets = { {
alias = 'router1',
roles = { 'router', 'vshard-router', 'failover-coordinator' },
join_servers = { { uri = '172.20.0.2:3301' } }
}, {
alias = 'router2',
roles = { 'router', 'vshard-router', 'failover-coordinator' },
join_servers = { { uri = '172.20.0.3:3301' } }
}, {
alias = 'router3',
roles = { 'router', 'vshard-router', 'failover-coordinator' },
join_servers = { { uri = '172.20.0.4:3301' } }
}, {
alias = 'storage1',
roles = { 'storage', 'vshard-storage' },
join_servers = { { uri = '172.20.0.2:3302' },
{ uri = '172.20.0.3:3302' },
{ uri = '172.20.0.4:3302' } }
}, {
alias = 'storage2',
roles = { 'storage', 'vshard-storage' },
join_servers = { { uri = '172.20.0.3:3303' },
{ uri = '172.20.0.2:3303' },
{ uri = '172.20.0.4:3303' } }
}, {
alias = 'storage3',
roles = { 'storage', 'vshard-storage' },
join_servers = { { uri = '172.20.0.4:3304' },
{ uri = '172.20.0.2:3304' },
{ uri = '172.20.0.3:3304' } }
} }
cartridge.admin_edit_topology({ replicasets = replicasets })
cartridge.admin_bootstrap_vshard()
cartridge.failover_set_params({
mode = 'stateful',
state_provider = 'etcd2',
etcd2_params = {
prefix = '/',
lock_delay = 10,
endpoints = { '172.20.0.2:2379', '172.20.0.3:2379', '172.20.0.4:2379' }
}
})
Related
I have created an API Gateway v2 which exposes a single AWS Lambda which I intend to use to expose my entire REST API through. So far I have this working just fine by following the hashicorp tutorial.
// S3 bucket for code release and updates.
resource "aws_s3_bucket" "lambda_zips" {
bucket = "tdweb-lambda-zips"
acl = "private"
force_destroy = true
}
data "archive_file" "web_api_lambda" {
type = "zip"
source_dir = "${path.module}/web-api"
output_path = "${path.module}/web-api.zip"
}
resource "aws_s3_object" "web_api" {
bucket = aws_s3_bucket.lambda_zips.id
key = "web-api.zip"
source = data.archive_file.web_api_lambda.output_path
etag = filemd5(data.archive_file.web_api_lambda.output_path)
}
// Lambda and associated IAM roles and permissions.
resource "aws_lambda_function" "web_api" {
function_name = "web_api_lambda"
s3_bucket = aws_s3_bucket.lambda_zips.id
s3_key = aws_s3_object.web_api.key
runtime = "python3.9"
handler = "lambda_function.lambda_handler"
source_code_hash = data.archive_file.web_api_lambda.output_base64sha256
role = aws_iam_role.web_api_lambda.arn
}
resource "aws_cloudwatch_log_group" "web_api_lambda" {
name = "/aws/lambda/${aws_lambda_function.web_api.function_name}"
retention_in_days = 30
}
data "aws_iam_policy_document" "web_api_assume_role" {
statement {
effect = "Allow"
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["lambda.amazonaws.com"]
}
}
}
resource "aws_iam_role" "web_api_lambda" {
name = "web_api_lambda_role"
assume_role_policy = data.aws_iam_policy_document.web_api_assume_role.json
}
data "aws_iam_policy_document" "web_api_lambda" {
statement {
effect = "Allow"
actions = [
"logs:CreateLogStream",
"logs:CreateLogDelivery",
"logs:PutLogEvents"
]
resources = ["arn:aws:logs:*:*:*"]
}
statement {
effect = "Allow"
actions = [
"dynamodb:*"
]
resources = ["*"]
}
statement {
effect = "Allow"
actions = [
"s3:*",
]
resources = ["*"]
}
}
resource "aws_iam_role_policy" "web_api_lambda" {
name = "web_api_lambda_policy"
policy = data.aws_iam_policy_document.web_api_lambda.json
role = aws_iam_role.web_api_lambda.id
}
// API Gateway
resource "aws_apigatewayv2_api" "web_api" {
name = "web_api_gateway"
protocol_type = "HTTP"
}
resource "aws_apigatewayv2_stage" "web_api_prod" {
api_id = aws_apigatewayv2_api.web_api.id
name = "prod"
auto_deploy = true
access_log_settings {
destination_arn = aws_cloudwatch_log_group.web_api_gateway.arn
format = jsonencode({
requestId = "$context.requestId"
sourceIp = "$context.identity.sourceIp"
requestTime = "$context.requestTime"
protocol = "$context.protocol"
httpMethod = "$context.httpMethod"
resourcePath = "$context.resourcePath"
routeKey = "$context.routeKey"
status = "$context.status"
responseLength = "$context.responseLength"
integrationErrorMessage = "$context.integrationErrorMessage"
}
)
}
}
resource "aws_apigatewayv2_integration" "web_api" {
api_id = aws_apigatewayv2_api.web_api.id
integration_uri = aws_lambda_function.web_api.invoke_arn
integration_type = "AWS_PROXY"
integration_method = "POST"
}
resource "aws_apigatewayv2_route" "web_api_prod" {
api_id = aws_apigatewayv2_api.web_api.id
route_key = "GET /"
target = "integrations/${aws_apigatewayv2_integration.web_api.id}"
}
resource "aws_cloudwatch_log_group" "web_api_gateway" {
name = "/aws/api_gw/${aws_apigatewayv2_api.web_api.name}"
retention_in_days = 30
}
resource "aws_lambda_permission" "web_api_gateway" {
statement_id = "AllowExecutionFromAPIGateway"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.web_api.function_name
principal = "apigateway.amazonaws.com"
source_arn = "${aws_apigatewayv2_api.web_api.execution_arn}/*/*"
}
output "base_url" {
value = aws_apigatewayv2_stage.web_api_prod.invoke_url
}
Now I can do a simple curl of the output base_url and I see the response from my Lambda.
I would like for this API to be exposed to via my domain name. So I've add this:
resource "aws_apigatewayv2_domain_name" "web_api" {
domain_name = "prod.${var.domain_name}"
domain_name_configuration {
certificate_arn = aws_acm_certificate.main.arn
endpoint_type = "REGIONAL"
security_policy = "TLS_1_2"
}
}
resource "aws_apigatewayv2_api_mapping" "web_api" {
api_id = aws_apigatewayv2_api.web_api.id
domain_name = aws_apigatewayv2_domain_name.web_api.id
stage = aws_apigatewayv2_stage.web_api_prod.id
}
The certificate is working with an existing web server running on EC2 and is working. However, then I try to curl prod.mydomainname.com I get an error of:
curl: (6) Could not resolve host: prod.mydomainname.com
I am not sure why it's not exposed.
I missed the route53 entry.
resource "aws_route53_record" "web_api" {
zone_id = aws_route53_zone.external.zone_id
name = aws_apigatewayv2_domain_name.web_api.domain_name
type = "A"
alias {
name = aws_apigatewayv2_domain_name.web_api.domain_name_configuration[0].target_domain_name
zone_id = aws_apigatewayv2_domain_name.web_api.domain_name_configuration[0].hosted_zone_id
evaluate_target_health = false
}
}
I currently have the below lists,
local.security_list =
[
{
ocid = security_list_id_1
name = subnet1
},
{
ocid = security_list_id_2
name = subnet1
},
{
ocid = security_list_id_3
name = subnet2
},
{
name = subnet2
ocid = security_list_id_4
},
]
var.vcn_security_lists = [
{
security_list_name = security_list_1
subnet_name = subnet1
},
{
security_list_name = security_list_2
subnet_name = subnet1
},
{
security_list_name = security_list_3
subnet_name = subnet2
}
]
I want to create the security lists using the var.vcn_security_lists list and then assign them to the subnet later on by using the subnet_name field. I have done this previously using the index() function as below, which loops through a subnet and pulls out the security list with the same subnet name. The problem is that this only returns one item - how can I return multiple from a list ?
subnet_security_list_ocid = [local.security_list[index(local.security_list.*.name,var.vcn_security_lists[index(var.vcn_security_lists.*.subnet_name,var.vcn_subnets[count.index].subnet_name)].security_list_name)].id]
I started two Nomad jobs, one for PostgreSQL and another on for pgAdmin on the Nomad dev on my MacOS. The jobspecs are these:
## postgres.nomad
job "postgres" {
datacenters = ["dc1"]
type = "service"
group "postgres" {
count = 1
task "postgres" {
driver = "docker"
config {
image = "postgres"
network_mode = "host"
port_map {
db = 5432
}
}
env {
POSTGRES_USER="postgres"
POSTGRES_PASSWORD="postgres"
}
logs {
max_files = 5
max_file_size = 15
}
resources {
cpu = 1000
memory = 1024
network {
mbits = 10
port "db" {
static = 5432
}
}
}
service {
name = "postgres"
tags = ["postgres for vault"]
port = "db"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
}
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
}
update {
max_parallel = 1
min_healthy_time = "5s"
healthy_deadline = "5m"
auto_revert = false
canary = 0
}
}
## pgadmin.nomad
job "pgadmin4" {
datacenters = ["dc1"]
type = "service"
group "pgadmin4" {
count = 1
task "pgadmin4" {
driver = "docker"
config {
image = "dpage/pgadmin4"
network_mode = "host"
port_map {
db = 8080
}
volumes = [
"local/servers.json:/servers.json",
"local/servers.passfile:/root/.pgpass"
]
}
template {
perms = "600"
change_mode = "noop"
destination = "local/servers.passfile"
data = <<EOH
postgres.service.consul:5432:postgres:postgres:postgres
EOH
}
template {
change_mode = "noop"
destination = "local/servers.json"
data = <<EOH
{
"Servers": {
"1": {
"Name": "Local Server",
"Group": "Server Group 1",
"Port": "5432",
"Username": "root",
"PassFile": "/root/.pgpass",
"Host": "postgres.service.consul",
"SSLMode": "disable",
"MaintenanceDB": "postgres"
}
}
}
EOH
}
env {
PGADMIN_DEFAULT_EMAIL="youremail#yourdomain.com"
PGADMIN_DEFAULT_PASSWORD="yoursecurepassword"
PGADMIN_LISTEN_PORT="5050"
PGADMIN_CONFIG_ENHANCED_COOKIE_PROTECTION="False"
PGADMIN_SERVER_JSON_FILE="/servers.json"
}
logs {
max_files = 5
max_file_size = 15
}
resources {
cpu = 1000
memory = 1024
network {
mbits = 10
port "ui" {
static = 5050
}
}
}
service {
name = "pgadmin"
tags = [ "urlprefix-/pgadmin strip=/pgadmin"]
port = "ui"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
}
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
}
update {
max_parallel = 1
min_healthy_time = "5s"
healthy_deadline = "3m"
auto_revert = false
canary = 0
}
}
All of the jobs have been deployed successfully and has their status as running.
As seen in the jobspecs, pgadmin should be running in localhost:5050 but whenever I try to reach this address in the browser I get the "Can't connect to the server" error. Is there any configuration missing?
Your exposed a wrong port name.
Now, network is only available in job -> group -> network, see here
please avoid use network_mode = "host" if you just want to expose a single port.
here's avaliable config
job "postgres" {
datacenters = ["dc1"]
type = "service"
group "postgres" {
count = 1
task "postgres" {
driver = "docker"
config {
image = "postgres"
ports = ["db"]
}
env {
POSTGRES_USER="postgres"
POSTGRES_PASSWORD="postgres"
}
logs {
max_files = 5
max_file_size = 15
}
resources {
cpu = 1000
memory = 1024
}
service {
name = "postgres"
tags = ["postgres for vault"]
port = "db"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
}
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
network {
mbits = 10
port "db" {
static = 5432
}
}
}
update {
max_parallel = 1
min_healthy_time = "5s"
healthy_deadline = "5m"
auto_revert = false
canary = 0
}
}
job "pgadmin4" {
datacenters = ["dc1"]
type = "service"
group "pgadmin4" {
count = 1
task "pgadmin4" {
driver = "docker"
config {
image = "dpage/pgadmin4"
ports = ["ui"]
volumes = [
"local/servers.json:/servers.json",
"local/servers.passfile:/root/.pgpass"
]
}
template {
perms = "600"
change_mode = "noop"
destination = "local/servers.passfile"
data = <<EOH
postgres.service.consul:5432:postgres:postgres:postgres
EOH
}
template {
change_mode = "noop"
destination = "local/servers.json"
data = <<EOH
{
"Servers": {
"1": {
"Name": "Local Server",
"Group": "Server Group 1",
"Port": "5432",
"Username": "root",
"PassFile": "/root/.pgpass",
"Host": "postgres.service.consul",
"SSLMode": "disable",
"MaintenanceDB": "postgres"
}
}
}
EOH
}
env {
PGADMIN_DEFAULT_EMAIL="youremail#yourdomain.com"
PGADMIN_DEFAULT_PASSWORD="yoursecurepassword"
PGADMIN_LISTEN_PORT="5050"
PGADMIN_CONFIG_ENHANCED_COOKIE_PROTECTION="False"
PGADMIN_SERVER_JSON_FILE="/servers.json"
}
logs {
max_files = 5
max_file_size = 15
}
resources {
cpu = 1000
memory = 1024
}
service {
name = "pgadmin"
tags = [ "urlprefix-/pgadmin strip=/pgadmin"]
port = "ui"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
}
restart {
attempts = 10
interval = "5m"
delay = "25s"
mode = "delay"
}
network {
mbits = 10
port "ui" {
static = 5050
}
}
}
update {
max_parallel = 1
min_healthy_time = "5s"
healthy_deadline = "3m"
auto_revert = false
canary = 0
}
}
I would like to do an index mapping by passing through nest but by i want to give directly a raw elasticsearch request:
var setting = new ConnectionSettings(new Uri("uri"));
setting.DefaultIndex(_esIndexName);
var client = new ElasticClient(setting);
string rawEsRequest= "PUT /myindex
{
""mappings"": {
""review"": {
""properties"": {
""commentaire"": {
""analyzer"" : ""french"",
""type"": ""text"",
""fields"": {
""keyword"": {
""type"": ""keyword"",
""ignore_above"": 256
}
}
},
""date_creaation"": {
""type": "date""
}
}}}}"
//want to do this bellow
client.Mapping.rawPut(rawEsRequest);
Do you know if it is possible to give a direct elasticsearch request for doing mapping?
Yes, with the low level client in Elasticsearch.Net that is also exposed on the high level client in NEST through the .LowLevel property. You just need to remove the HTTP verb and URI as these are part of the method call on the client.
var pool = new SingleNodeConnectionPool(new Uri("http://localhost:9200"));
var defaultIndex = "myindex;
var connectionSettings = new ConnectionSettings(pool)
.DefaultIndex(defaultIndex);
var client = new ElasticClient(connectionSettings);
string rawEsRequest = #"{
""mappings"": {
""review"": {
""properties"": {
""commentaire"": {
""analyzer"" : ""french"",
""type"": ""text"",
""fields"": {
""keyword"": {
""type"": ""keyword"",
""ignore_above"": 256
}
}
},
""date_creaation"": {
""type"": ""date""
}
}
}
}
}";
ElasticsearchResponse<dynamic> putResponse =
client.LowLevel.IndicesCreate<dynamic>(defaultIndex, rawEsRequest);
I'm not able to get TermVector results properly thru SolrNet. I tried with the following code.
QueryOptions options = new QueryOptions()
{
OrderBy = new[] { new SortOrder("markupId", Order.ASC) },
TermVector = new TermVectorParameters
{
Fields = new[] { "text" },
Options = TermVectorParameterOptions.All
}
};
var results = SolrMarkupCore.Query(query, options);
foreach (var docVectorResult in results.TermVectorResults)
{
foreach (var vectorResult in docVectorResult.TermVector)
System.Diagnostics.Debug.Print(vectorResult.ToString());
}
In the above code, results.TermVectorResults in the outer foreach gives the proper count whereas docVectorResult.TermVector in the inner foreach is empty.
I've copied the generated solr query of the above code and issued against solr admin and I'm properly getting the termVectors values. The actual query I issued is below
http://localhost:8983/solr/select/?sort=markupId+asc&tv.tf=true&start=0&q=markupId:%2823%29&tv.offsets=true&tv=true&tv.positions=true&tv.fl=text&version=2.2&rows=50
First you should check HTTP query to sure termvector feature is set property.
If it's not OK, change your indexing based on:
The Term Vector Component
If it is OK,You can use "ExtraParams" by changing the handler to termvector handler. Try this:
public SolrQueryExecuter<Product> instance { get; private set; }
public ICollection<TermVectorDocumentResult> resultDoc(string q)
{
string SERVER="http://localhost:7080/solr/core";//change this
var container = ServiceLocator.Current as SolrNet.Utils.Container;
instance = new SolrQueryExecuter<Product>(
container.GetInstance<ISolrAbstractResponseParser<Product>>(),
new SolrConnection(SERVER),
container.GetInstance<ISolrQuerySerializer>(),
container.GetInstance<ISolrFacetQuerySerializer>(),
container.GetInstance<ISolrMoreLikeThisHandlerQueryResultsParser<Product>>());
instance.DefaultHandler = "/tvrh";
SolrQueryResults<Product> results =
instance.Execute(new SolrQuery(q),
new QueryOptions
{
Fields = new[] { "*" },
Start = 0,
Rows = 10,
ExtraParams = new Dictionary<string, string> {
{ "tv.tf", "false" },
{ "tv.df", "false" },
{ "tv.positions", "true" },
{ "tv", "true" },
{ "tv.offsets", "false" },
{ "tv.payloads", "true" },
{ "tv.fl", "message" },// change the field name here
}
}
);
return results.TermVectorResults;
}