Helm Override not taking in Env specific application.properties - spring-boot

i am deploying my springboot application docker image on GCP by using helm charts .For env specific configuration i use helm-override.yaml file.However i noticed my configured values in application-stage.properties are not being taken by application.Attaching below helm chart and build.gradle files
below is project structure
```
xyz <br>
settings.gradle <br>
build.gradle <br>
config <br>
prod
application-prod.properties
stage
application.properties
gradle/ <br>
wrapper/ <br>
gradle-wrapper.jar <br>
gradle-wrapper.properties <br>
src/ <br>
main/ <br>
java/ <br>
resources/ <br>
application.properties <br>
xyzcharts/ <br>
values.yaml <br>
config/ <br>
stage/ <br>
helm-override-stage.yaml <br>
templates/ <br>
configmap.yaml <br>
cronjob.yaml <br>
```
build.gradle
plugins {
id 'org.springframework.boot' version "${springBootVersion}"
id 'io.spring.dependency-management' version '1.0.15.RELEASE'
id 'java'
id 'eclipse'
id 'jacoco'
id 'org.sonarqube' version "3.3"
id 'com.google.cloud.tools.jib' version "${jibVersion}"
}
group = 'com.vsi.postgrestoattentive'
if (!project.hasProperty('buildName')) {
throw new GradleException("Usage for CLI:"
+ System.getProperty("line.separator")
+ "gradlew <taskName> -Dorg.gradle.java.home=<java-home-dir> -PbuildName=<major>.<minor>.<buildNumber> -PgcpProject=<gcloudProject>"
+ System.getProperty("line.separator")
+ "<org.gradle.java.home> - OPTIONAL if available in PATH"
+ System.getProperty("line.separator")
+ "<buildName> - MANDATORY, example 0.1.23")
+ System.getProperty("line.separator")
+ "<gcpProject> - OPTIONAL, project name in GCP";
}
project.ext {
buildName = project.property('buildName');
}
version = "${project.ext.buildName}"
sourceCompatibility = '1.8'
apply from: 'gradle/sonar.gradle'
apply from: 'gradle/tests.gradle'
apply from: 'gradle/image-build-gcp.gradle'
repositories {
mavenCentral()
}
dependencies {
implementation("org.springframework.boot:spring-boot-starter-web:${springBootVersion}")
implementation("org.springframework.boot:spring-boot-starter-actuator:${springBootVersion}")
implementation 'org.springframework.boot:spring-boot-starter-web:2.7.0'
developmentOnly 'org.springframework.boot:spring-boot-devtools'
testImplementation 'org.springframework.boot:spring-boot-starter-test'
testImplementation 'org.springframework.integration:spring-integration-test'
testImplementation 'org.springframework.batch:spring-batch-test:4.3.0'
implementation("org.springframework.boot:spring-boot-starter-data-jpa:${springBootVersion}")
implementation 'org.postgresql:postgresql:42.1.4'
implementation 'org.springframework.batch:spring-batch-core:4.1.1.RELEASE'
implementation 'com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.14.1'
implementation group: 'io.micrometer', name: 'micrometer-registry-datadog', version: '1.7.0'
implementation 'com.google.cloud:libraries-bom:26.3.0'
implementation 'com.google.cloud:google-cloud-storage:2.16.0'
}
bootJar {
archiveFileName = "${project.name}.${archiveExtension.get()}"
}
springBoot {
buildInfo()
}
test {
finalizedBy jacocoTestReport
}
jacoco {
toolVersion = "0.8.8"
}
jacocoTestReport {
dependsOn test
}
//SMS2-28: Code to make build check code coverage ratio
project.tasks["bootJar"].dependsOn "jacocoTestReport","jacocoTestCoverageVerification"
cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: {{ include "xyz.fullname" . }}
labels:
{{ include "xyz.labels" . | nindent 4 }}
spec:
schedule: "{{ .Values.schedule }}"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 5
failedJobsHistoryLimit: 2
jobTemplate:
spec:
template:
spec:
restartPolicy: Never
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: POSTGRES_DB_USER_NAME
valueFrom:
secretKeyRef:
name: xyz-feed-secret
key: DB_USER_NAME
- name: POSTGRES_DB_PASSWORD
valueFrom:
secretKeyRef:
name: xyz-feed-secret
key: DB_PASSWORD
- name: POSTGRES_DB_URL
valueFrom:
secretKeyRef:
name: xyz-feed-secret
key: DB_URL
- name: POSTGRES_TO_ATTENTIVE_TOKEN
valueFrom:
secretKeyRef:
name: xyz-feed-secret
key: ATTENTIVE_TOKEN
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: DD_AGENT_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: DD_ENV
value: {{ .Values.datadog.env }}
- name: DD_SERVICE
value: {{ include "xyz.name" . }}
- name: DD_VERSION
value: {{ include "xyz.AppVersion" . }}
- name: DD_LOGS_INJECTION
value: "true"
- name: DD_RUNTIME_METRICS_ENABLED
value: "true"
volumeMounts:
- mountPath: /app/config
name: logback
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
volumes:
- configMap:
name: {{ include "xyz.name" . }}
name: logback
backoffLimit: 0
metadata:
{{ with .Values.podAnnotations }}
annotations:
{{ toYaml . | nindent 8 }}
labels:
{{ include "xyz.selectorLabels" . | nindent 8 }}
{{- end }}
configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "xyz.name" . }}
labels:
{{- include "xyz.labels" . | nindent 4 }}
data:
application.properties: |-
{{- range .Files.Lines .Values.application.configoveride }}
{{ . }}{{ end }}
logback-spring.xml: |+
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
<include resource="org/springframework/boot/logging/logback/console-appender.xml" />
<include resource="org/springframework/cloud/gcp/logging/logback-json-appender.xml" />
<property name="projectId" value="${projectId:-${GOOGLE_CLOUD_PROJECT}}"/>
<appender name="CONSOLE_JSON" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.core.encoder.LayoutWrappingEncoder">
<layout class="org.springframework.cloud.gcp.logging.StackdriverJsonLayout">
<projectId>${projectId}</projectId>
<includeTraceId>true</includeTraceId>
<includeSpanId>true</includeSpanId>
<includeLevel>true</includeLevel>
<includeThreadName>true</includeThreadName>
<includeMDC>true</includeMDC>
<includeLoggerName>true</includeLoggerName>
<includeFormattedMessage>true</includeFormattedMessage>
<includeExceptionInMessage>false</includeExceptionInMessage>
<includeContextName>true</includeContextName>
<includeMessage>true</includeMessage>
<includeException>true</includeException>
<jsonFormatter
class="ch.qos.logback.contrib.jackson.JacksonJsonFormatter">
</jsonFormatter>
</layout>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="CONSOLE_JSON"/>
</root>
</configuration>
values.yaml
# Default values for postgres_to_attentive_product_catalog.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
###SMS2-40 - replica count indicates no of instances we need
### - If we want 3 intances then we will metion 3 -then 3 pods will be created on server
### - For staging env we usually keep 1
replicaCount: 1
image:
###SMS2-40 - Below is image name which is created duuring build-->GCP Build image
### --->We can also give local Image details also here
### --->We can create image in Docker repository and use that image URL here
repository: gcr.io/mgcp-1308657-vsi-operations/smscatalogfeed
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: "smscatalogfeed"
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
schedule: "56 17 * * *"
###SMS2-40 - There are 2 ways how we want to serve our applications-->1st->LoadBalancer or 2-->NodePort
service:
type: NodePort
port: 8087
liveness: /actuator/health/liveness
readiness: /actuator/health/readiness
###service:
### type: ClusterIP
### port: 80
restartPolicy: "Never"
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
###SMS2-40 - Below command is used to override configuration with config/application.properties
application:
configoveride: "config/application.properties"
helm-override-stage.yaml
replicaCount: 1
#SMS2-12 : mgcp-1308657-vsi-operations is our server/project in GCP
image:
repository: gcr.io/mgcp-1308657-vsi-operations/smscatalogfeed
tag: <IMAGE_TAG_PLACEHOLDER_TO_BE_REPLACED>
application:
configoveride: "config/stage/application-stage.properties"
datadog:
enabled: true
env: stage

Looks like there are many problems with your project structure and your yaml configs:
application-stage.properties file is missing;
Other chart files like Chart.yaml, _helpers.tpl are not reflected in your project structure;
secret.yaml is missing for your chart template, but used in your CronJob jobTemplate envs fetching with secretKeyRef;
Your application-stage.properties file is only stored in a configmap.yaml;
Your configmap.yaml data is defined with filename as key and file content as value, which is not suited for exposing as container environment variables.
So as to the question why configured values in application-stage.properties are not being taken by application, the reason is that you defined your CronJob container to fetch envs from a secret which is missing. You only have your application-stage.properties stored in a configmap as file, not key-value pairs.
The tutorial: spring boot application.properties in kubernetes may provide you more guidelines.

Related

Find and update key values in a file using ansible role

I need to update the key values using regex and replace like all user input ,username and password using a separate new task.
inventory/main.yml
service1 : abc.com/s1
service2 : def.com/s2
username : user
password: pass
values.yml
---
service1:
image:
name: <user input> # service1 image
service2:
config:
range: 127.0.0.0
image:
name: <user input> # service2 image
id:
username: # DB username
password: # DB Password
Expectation :
values.yml
---
service1:
image:
name: abc.com/s1 # service1 image
service2:
config:
range: 127.0.0.0
image:
name: def.com/s2 # service2 image
id:
username: user # DB username
password: pass # DB Password
Should create a update_values.yml task which will find the values.yml path and replace the required values
My attempt as update_values.yml
---
- name: update value
replace:
path: <path>/values.yml
regexp: "service1.image.name: <user input>"
replace: "abc.com/s1"
In Ansible, this is a typical use-case of a template. Create the template
shell> cat values.yml.j2
---
service1:
image:
name: {{ service1 }} # service1 image
service2:
config:
range: 127.0.0.0
image:
name: {{ service2 }} # service2 image
id:
username: {{ username }} # DB username
password: {{ password }} # DB Password
Then, the tasks below
- include_vars: inventory/main.yml
- template:
src: values.yml.j2
dest: values.yml
create the file
shell> cat values.yml
---
service1:
image:
name: abc.com/s1 # service1 image
service2:
config:
range: 127.0.0.0
image:
name: def.com/s2 # service2 image
id:
username: user # DB username
password: pass # DB Password
Q: "Is there any other method using regex and replace?"
A: You can use lineinfile if you want to. For example
- lineinfile:
path: values.yml
regexp: "{{ item.regexp }}"
line: "{{ item.line }}"
backrefs: true
loop:
- {regexp: '^(\s*)name: <user input>(.*)$', line: '\1name: {{ service2 }}\2'}
- {regexp: '^(\s*)name: <user input>(.*)$', line: '\1name: {{ service1 }}\2'}
- {regexp: '^(\s*)username:\s+.*?(\s+#.*)$', line: '\1username: {{ username }}\2'}
- {regexp: '^(\s*)password:\s+.*?(\s+#.*)$', line: '\1password: {{ password }}\2'}
change the file
shell> cat values.yml
---
service1:
image:
name: abc.com/s1 # service1 image
service2:
config:
range: 127.0.0.0
image:
name: def.com/s2 # service2 image
id:
username: user # DB username
password: pass # DB Password

How to add springboot application monitoring to prometheus in Kubernetes?

Good afternoon everyone, I have a question about adding monitoring of the application itself to prometheus.
I am using spring boot actuator and see the values for prometheus accordingly: https://example.com/actuator/prometheus
I have raised prometheus via the default helm chart ( helm -n monitor upgrade -f values.yaml pg prometheus-community/kube-prometheus-stack )by adding default values for it:
additionalScrapeConfigs:
job_name: prometheus
scrape_interval: 40s
scrape_timeout: 40s
metrics_path: /actuator/prometheus
scheme: https
Prometheus itself can be found at http://ex.com/prometheus
The deployment.yaml file of my springboot application is as follows:
apiVersion : apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: backend
template:
metadata:
labels:
app: backend
annotations:
prometheus.io/path: /actuator/prometheus
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
spec:
containers:
- env:
- name: DATABASE_PASSWORD
value: {{ .Values.DATABASE_PASSWORD }}
- name: DATASOURCE_USERNAME
value: {{ .Values.DATASOURCE_USERNAME }}
- name: DATASOURCE_URL
value: jdbc:postgresql://database-postgresql:5432/dev-school
name : {{ .Release.Name }}
image: {{ .Values.container.image }}
ports:
- containerPort : 8080
However, after that prometheus still can't see my values.
Can you tell me what the error could be?
In prometheus-operator,
additionalScrapeConfigs is not used in this way.
According to documentation Additional Scrape Configuration:
AdditionalScrapeConfigs allows specifying a key of a Secret containing additional Prometheus scrape configurations.
The easiest way to add new scrape config is to use a servicemonitor, like the example below:
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: example-app
labels:
team: frontend
spec:
selector:
matchLabels:
app: backend
endpoints:
- port: web

DB Credentials Exposed as part job parameters when executing a task from SCDF

I have Custom Built SCDF which is built as docker image in Openshift and referred in server-deployment.yaml as docker image.I use the Oracle db to store the task meta data and is an external source here. I pass the all db properties in configmap. The DB password is base64 encoded and added in config map as secret. These db details are being used by SCDF to store task metadata.
These job parameters are passed by SCDF to the executing job.But these job parameters which in turn are the datasource properties including the db password present in the configmap are being printed in logs as Job parameters, and batch_job_execution_params table.
I thought using the password as secret in configmap should resolve this. But it's not. Below is the logs and table snippet of job parameters being printed.
I would like to know how to avoid passing these db properties as job parameters to the executing job so to prevent the credentials being exposed?
12-06-2020 18:12:38.540 [main] INFO org.springframework.batch.core.launch.support.SimpleJobLauncher.run - Job:
[FlowJob: [name=Job]] launched with the following parameters: [{
-spring.cloud.task.executionid=8010,
-spring.cloud.data.flow.platformname=default,
-spring.datasource.username=ACTUAL_USERNAME,
-spring.cloud.task.name=Alljobs,
Job.ID=1591985558466,
-spring.datasource.password=ACTUAL_PASSWORD,
-spring.datasource.driverClassName=oracle.jdbc.OracleDriver,
-spring.datasource.url=DATASOURCE_URL,
-spring.batch.job.names=Job_1}]
Pod Created for the Job execution - openshift screenshot
Database Table
Custom SCDF Dockerfile.yaml
===========================
FROM maven:3.5.2-jdk-8-alpine AS MAVEN_BUILD
COPY pom.xml /build/
COPY src /build/src/
WORKDIR /build/
RUN mvn package
FROM openjdk:8-jre-alpine
WORKDIR /app
COPY --from=MAVEN_BUILD /build/target/BatchAdmin-0.0.1-SNAPSHOT.jar /app/
ENTRYPOINT ["java", "-jar", "BatchAdmin-0.0.1-SNAPSHOT.jar"]
Deployment.yaml
===============
apiVersion: apps/v1
kind: Deployment
metadata:
name: scdf-server
labels:
app: scdf-server
spec:
selector:
matchLabels:
app: scdf-server
replicas: 1
template:
metadata:
labels:
app: scdf-server
spec:
containers:
- name: scdf-server
image: docker-registry.default.svc:5000/batchadmin/scdf-server #DockerImage
imagePullPolicy: Always
volumeMounts:
- name: config
mountPath: /config
readOnly: true
ports:
- containerPort: 80
livenessProbe:
httpGet:
path: /management/health
port: 80
initialDelaySeconds: 45
readinessProbe:
httpGet:
path: /management/info
port: 80
initialDelaySeconds: 45
resources:
limits:
cpu: 1.0
memory: 2048Mi
requests:
cpu: 0.5
memory: 1024Mi
env:
- name: KUBERNETES_NAMESPACE
valueFrom:
fieldRef:
fieldPath: "metadata.namespace"
- name: SERVER_PORT
value: '80'
- name: SPRING_CLOUD_CONFIG_ENABLED
value: 'false'
- name: SPRING_CLOUD_DATAFLOW_FEATURES_ANALYTICS_ENABLED
value: 'true'
- name: SPRING_CLOUD_DATAFLOW_FEATURES_SCHEDULES_ENABLED
value: 'true'
- name: SPRING_CLOUD_DATAFLOW_TASK_COMPOSED_TASK_RUNNER_URI
value: 'docker://springcloud/spring-cloud-dataflow-composed-task-runner:2.6.0.BUILD-SNAPSHOT'
- name: SPRING_CLOUD_KUBERNETES_CONFIG_ENABLE_API
value: 'true'
- name: SPRING_CLOUD_KUBERNETES_SECRETS_ENABLE_API
value: 'true'
- name: SPRING_CLOUD_KUBERNETES_SECRETS_PATHS
value: /etc/secrets
- name: SPRING_CLOUD_DATAFLOW_FEATURES_TASKS_ENABLED
value: 'true'
- name: SPRING_CLOUD_KUBERNETES_CONFIG_NAME
value: scdf-server
- name: SPRING_CLOUD_DATAFLOW_SERVER_URI
value: 'http://${SCDF_SERVER_SERVICE_HOST}:${SCDF_SERVER_SERVICE_PORT}'
# Add Maven repo for metadata artifact resolution for all stream apps
- name: SPRING_APPLICATION_JSON
value: "{ \"maven\": { \"local-repository\": null, \"remote-repositories\": { \"repo1\": { \"url\": \"https://repo.spring.io/libs-snapshot\"} } } }"
serviceAccountName: scdf-sa
volumes:
- name: config
configMap:
name: scdf-server
items:
- key: application.yaml
path: application.yaml
#- name: SPRING_CLOUD_DATAFLOW_FEATURES_TASKS_ENABLED
#value : 'true'
server-config.yaml
==================
apiVersion: v1
kind: ConfigMap
metadata:
name: scdf-server
labels:
app: scdf-server
data:
application.yaml: |-
spring:
cloud:
dataflow:
task:
platform:
kubernetes:
accounts:
default:
limits:
memory: 1024Mi
cpu: 2
entry-point-style: exec
image-pull-policy: always
datasource:
url: jdbc:oracle:thin:#db_url
username: BATCH_APP
password: ${oracle-root-password}
driver-class-name: oracle.jdbc.OracleDriver
testOnBorrow: true
validationQuery: "SELECT 1"
flyway:
enabled: false
jpa:
hibernate:
use-new-id-generator-mappings: true
oracle-secrets.yaml
===================
apiVersion: v1
kind: Secret
metadata:
name: oracle
labels:
app: oracle
data:
oracle-root-password: a2xldT3ederhgyzFCajE4YQ==
Any help would be much appreciated. Thanks.
In SCDF V2.6.2 the team fixed this issue. The DB credentials are no longer exposed in logs, POD Description page or Database. By default, the credentials would be visible. So anyone who have this issue have to do is add the following environment variable as part of the Deployment configuration and set the value to true.
SPRING_CLOUD_DATAFLOW_TASK_USE_KUBERNETES_SECRETS_FOR_DB_CREDENTIALS = true
This is not a perfect example - but you can mask the password in the log by using features of logback (default logging lib used by Spring Boot)
Put the below configuration into your logback, it would replace the password by ****
<springProfile name="local">
<include resource="org/springframework/boot/logging/logback/console-appender.xml"/>
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
%d{dd-MM-yyyy HH:mm:ss.SSS} [%thread] %-5level %logger{36}.%M - %replace(%msg){'password=\S*', 'password=****'}%n
</pattern>
</encoder>
</appender>
<root level="info">
<appender-ref ref="console"/>
</root>
</springProfile>
For the password logged in database, look like we are out of luck. The best thing we can do is to put it in a separate schema and have specific permissions to access those SCDF tables.

server-deployment.yml not reading values from server-config.yml in Spring Cloud Data flow server

I have deployed the Custom Built SCDF 2.52 in openshift environment which is up and running successfully. I followed the guide 2.5.0.RELEASE_Guide. The Issue is the the properties given in server-config are not being considered by server-deployment.yaml file when I mount them. Though I could see the mappings for application.yaml is visible in deployment configuration, the properties are not read while the server is starting.
So when I build the custom scdf I have to add all the server properties including kubernetes memory limits, oracle datasource(External Datasource) properties in the scdf projects' application.properties file. Only then values of kube properties are being read platform being setup and External oracle datasource is getting connected. Below are the files that I'm using. I'm new to this SCDF and kubernetes. So please let me know if i'm missing anything anywhere.
Also why I added the kubernetes properties in application.properties of custom scdf project. Reason here in this question
server-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: scdf-server
labels:
app: scdf-server
data:
application.yaml: |-
spring:
cloud:
dataflow:
task:
platform:
kubernetes:
accounts:
default:
limits:
memory: 1024Mi
datasource:
url: jdbc:oracle:thin:#hostname:port/db
username: root
password: oracle-root-password
driver-class-name: oracle.jdbc.OracleDriver
testOnBorrow: true
validationQuery: "SELECT 1"
server-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: scdf-server
labels:
app: scdf-server
spec:
selector:
matchLabels:
app: scdf-server
replicas: 1
template:
metadata:
labels:
app: scdf-server
spec:
containers:
- name: scdf-server
image: docker-registry.default.svc:5000/batchadmin/scdf-server
imagePullPolicy: Always
volumeMounts:
- name: config
mountPath: /config
readOnly: true
ports:
- containerPort: 80
livenessProbe:
httpGet:
path: /management/health
port: 80
initialDelaySeconds: 45
readinessProbe:
httpGet:
path: /management/info
port: 80
initialDelaySeconds: 45
resources:
limits:
cpu: 1.0
memory: 2048Mi
requests:
cpu: 0.5
memory: 1024Mi
env:
- name: KUBERNETES_NAMESPACE
valueFrom:
fieldRef:
fieldPath: "metadata.namespace"
- name: SERVER_PORT
value: '80'
- name: SPRING_CLOUD_CONFIG_ENABLED
value: 'false'
- name: SPRING_CLOUD_DATAFLOW_FEATURES_ANALYTICS_ENABLED
value: 'true'
- name: SPRING_CLOUD_DATAFLOW_FEATURES_SCHEDULES_ENABLED
value: 'true'
- name: SPRING_CLOUD_DATAFLOW_TASK_COMPOSED_TASK_RUNNER_URI
value: 'docker://springcloud/spring-cloud-dataflow-composed-task-runner:2.6.0.BUILD-SNAPSHOT'
- name: SPRING_CLOUD_KUBERNETES_CONFIG_ENABLE_API
value: 'false'
- name: SPRING_CLOUD_KUBERNETES_SECRETS_ENABLE_API
value: 'false'
- name: SPRING_CLOUD_KUBERNETES_SECRETS_PATHS
value: /etc/secrets
- name: SPRING_CLOUD_DATAFLOW_FEATURES_TASKS_ENABLED
value : 'true'
- name: SPRING_CLOUD_DATAFLOW_SERVER_URI
value: 'http://${SCDF_SERVER_SERVICE_HOST}:${SCDF_SERVER_SERVICE_PORT}'
# Add Maven repo for metadata artibatcht resolution for all stream apps
- name: SPRING_APPLICATION_JSON
value: "{ \"maven\": { \"local-repository\": null, \"remote-repositories\": { \"repo1\": { \"url\": \"https://repo.spring.io/libs-snapshot\"} } } }"
serviceAccountName: scdf-sa
volumes:
- name: config
configMap:
name: scdf-server
items:
- key: application.yaml
path: application.yaml
application.properties - the Only thing that runs the SCDF right now.
spring.application.name=batchadmin
spring.datasource.url=jdbc:oracle:thin:#hostname:port/db
spring.datasource.username=root
spring.datasource.password=oracle_root_password
spring.datasource.driver-class-name=oracle.jdbc.OracleDriver
spring.cloud.dataflow.task.platform.kubernetes.accounts.default.image-pull-policy= always
spring.cloud.dataflow.task.platform.kubernetes.accounts.default.entry-point-style= exec
spring.cloud.dataflow.task.platform.kubernetes.accounts.default.limits.cpu=2
spring.cloud.dataflow.task.platform.kubernetes.accounts.default.limits.memory=1024Mi
spring.flyway.enabled=false
spring.jpa.show-sql=true
spring.jpa.hibernate.use-new-id-generator-mappings=true
logging.level.root=info
logging.file.max-size=5GB
logging.file.max-history=30
logging.pattern.console=%d{dd-MM-yyyy HH:mm:ss.SSS} [%thread] %-5level %logger.%M - %msg%n
My main concern here apart from the above issue is db password. Since SCDF passes all the application.properties related to datasource and kubernetes as job_parameters including the db password, the password is being printed in the logs, visible in the running pod config and in batch_job_execution_params.
Application.properties as Job params
To summarize the issues here as questions,
server-config.yaml properties are not being used by server-deployment.yaml? What went wrong?
Since I pass server properties from application.prop file all the properties are visible in logs as well as Db. So is there a way I could hide them?
Thanks in advance.
server-role
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: scdf-role
rules:
- apiGroups: [""]
resources: ["services", "pods", "replicationcontrollers", "persistentvolumeclaims"]
verbs: ["get", "list", "watch", "create", "delete", "update"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["statefulsets", "deployments", "replicasets"]
verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
- apiGroups: ["extensions"]
resources: ["deployments", "replicasets"]
verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
- apiGroups: ["batch"]
resources: ["cronjobs", "jobs"]
verbs: ["create", "delete", "get", "list", "watch", "update", "patch"]

How can I edit path.data and path.log for elasticsearch on Kubernetes?

I made an es-deploy.yml file then I typed the path.log and path.data values.
After creating the pod, I checked that directory then there was nothing.
The setting did not work!
How can I edit path.data and path.log for elasticsearch on Kubernetes!
I also tried using PATH_DATA
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: es
labels:
component: elasticsearch
spec:
replicas: 1
template:
metadata:
labels:
component: elasticsearch
spec:
serviceAccount: elasticsearch
initContainers:
- name: init-sysctl
image: busybox
imagePullPolicy: IfNotPresent
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
containers:
- name: es
securityContext:
capabilities:
add:
- IPC_LOCK
image: docker.elastic.co/elasticsearch/elasticsearch:7.3.0
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: "CLUSTER_NAME"
value: "myesdb"
- name: "DISCOVERY_SERVICE"
value: "elasticsearch"
- name: NODE_MASTER
value: "true"
- name: NODE_DATA
value: "true"
- name: HTTP_ENABLE
value: "true"
- name: ES_JAVA_OPTS
value: "-Xms256m -Xmx256m"
- name: "path.data"
value: "/data/elk/data"
- name: "path.logs"
value: "/data/elk/log"
ports:
- containerPort: 9200
name: http
protocol: TCP
- containerPort: 9300
name: transport
protocol: TCP
volumeMounts:
- mountPath: /data/elk/
Those values path.data and path.logs are not environment variables. They are config options.
The default path.data for the official elasticsearch image is /usr/share/elasticsearch/data based on the default value of ES_HOME=/usr/share/elasticsearch/ If you don't want to use that path you have to override it in the elasticsearch.yaml config.
You will have to create a ConfigMap containing your elasticsearch.yaml with something like this:
apiVersion: v1
kind: ConfigMap
metadata:
name: elasticsearch-config
namespace: es
data:
elasticsearch.yml: |
cluster:
name: ${CLUSTER_NAME:elasticsearch-default}
node:
master: ${NODE_MASTER:true}
data: ${NODE_DATA:true}
name: ${NODE_NAME}
ingest: ${NODE_INGEST:true}
max_local_storage_nodes: ${MAX_LOCAL_STORAGE_NODES:1}
processors: ${PROCESSORS:1}
network.host: ${NETWORK_HOST:_site_}
path:
data: ${DATA_PATH:"/data/elk"}
repo: ${REPO_LOCATIONS:[]}
bootstrap:
memory_lock: ${MEMORY_LOCK:false}
http:
enabled: ${HTTP_ENABLE:true}
compression: true
cors:
enabled: true
allow-origin: "*"
discovery:
zen:
ping.unicast.hosts: ${DISCOVERY_SERVICE:elasticsearch-discovery}
minimum_master_nodes: ${NUMBER_OF_MASTERS:1}
xpack:
license.self_generated.type: basic
(Note that the above ConfigMap will also allow you to use the DATA_PATH environment variable)
Then mount your volumes in your Pod with something like this:
volumeMounts:
- name: storage
mountPath: /data/elk
- name: config-volume
mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
subPath: elasticsearch.yml
volumes:
- name: config-volume
configMap:
name: elasticsearch-config
- name: storage
<add-whatever-volume-you-are-using-for-data>

Resources