Set Prefix to Spring Micrometer Merics using Statsd and Datadog - spring

I'm trying to Implement Custom Metrics Integration for my App. Using the following setup
// DogStatsd Metrics Integration with MicroMeter
implementation group: 'io.micrometer', name: 'micrometer-registry-statsd', version: '1.7.2'
Custom Spring Configuration Added for the application
#Configuration
public class MetricConfiguration {
private final MeterRegistry meterRegistry;
#Value("${management.metrics.export.statsd.prefix}")
private String prefix;
#Autowired
public MetricConfiguration(MeterRegistry meterRegistry) {
this.meterRegistry = meterRegistry;
}
#Bean
public MeterRegistryCustomizer<MeterRegistry> metricsCommonTags() {
return registry -> registry.config().meterFilter(new MeterFilter() {
#Override
public Meter.Id map(Meter.Id id) {
if (!id.getName().startsWith(prefix)) {
return id.withName(prefix + "." + id.getName());
} else {
return id;
}
}
});
}
#Bean
public TimedAspect timedAspect() {
return new TimedAspect(meterRegistry);
}
}
YAML Configuration for Metrics
management:
metrics:
enable:
jvm: false
process: false
tomcat: false
system: false
logback: false
distribution:
slo:
http.server.requests: 50ms
percentiles-histogram:
http.server.requests: true
percentiles:
http.server.requests: 0.99
export:
statsd:
enabled: false
flavor: datadog
host: ${DD_AGENT_HOST}
port: 8125
prefix: ${spring.application.name}
endpoints:
enabled-by-default: true
web:
exposure:
include: "*"
endpoint:
metrics:
enabled: true
health:
enabled: true
show-components: "always"
show-details: "always"
Trying to set the prefix to all the custom metrics, but after setting the prefix the excluded metrics are breaking are started showing in the /actuator/metrics response.
The response looks like below:
{
names: [
"my-service.http.server.requests",
"my-service.jvm.buffer.count",
"my-service.jvm.buffer.memory.used",
"my-service.jvm.buffer.total.capacity",
"my-service.jvm.classes.loaded",
"my-service.jvm.classes.unloaded",
"my-service.jvm.gc.live.data.size",
"my-service.jvm.gc.max.data.size",
"my-service.jvm.gc.memory.allocated",
"my-service.logback.events",
"my-service.process.cpu.usage",
"my-service.process.files.max",
"my-service.process.files.open",
"my-service.process.start.time",
"my-service.process.uptime",
"my-service.system.cpu.count",
"my-service.system.cpu.usage",
"my-service.system.load.average.1m",
"my-service.tomcat.sessions.active.current",
"my-service.tomcat.sessions.active.max",
"my-service.tomcat.sessions.alive.max",
"my-service.tomcat.sessions.created",
"my-service.tomcat.sessions.expired",
"my-service.tomcat.sessions.rejected"
]
}

Related

Cloud stream not able to track the status for down stream failures

I have written the following code to leverage the cloud stream functional approach to get the events from the RabbitMQ and publish those to KAFKA, I am able to achieve the primary goal with caveat while running the application if the KAFKA broker goes down due to any reason then I am getting the logs of KAFKA BROKER it's down but at the same time I want to stop the event from rabbitMQ or until the broker comes up those messages either should be routed to Exchange or DLQ topic. however, I have seen at many places to use producer sync: true but in my case that is either not helping, a lot of people talked about #ServiceActivator(inputChannel = "error-topic") for error topic while having a failure at target channel, this method is also not getting executed. so in short I don't want to lose my messages received from rabbitMQ during kafka is down due to any reason
application.yml
management:
health:
binders:
enabled: true
kafka:
enabled: true
server:
port: 8081
spring:
rabbitmq:
publisher-confirms : true
kafka:
bootstrap-servers: localhost:9092
producer:
properties:
max.block.ms: 100
admin:
fail-fast: true
cloud:
function:
definition: handle
stream:
bindingRetryInterval : 30
rabbit:
bindings:
handle-in-0:
consumer:
bindingRoutingKey: MyRoutingKey
exchangeType: topic
requeueRejected : true
acknowledgeMode: AUTO
# ackMode: MANUAL
# acknowledge-mode: MANUAL
# republishToDlq : false
kafka:
binder:
considerDownWhenAnyPartitionHasNoLeader: true
producer:
properties:
max.block.ms : 100
brokers:
- localhost
bindings:
handle-in-0:
destination: test_queue
binder: rabbit
group: queue
handle-out-0:
destination: mytopic
producer:
sync: true
errorChannelEnabled: true
binder: kafka
binders:
error:
destination: myerror
rabbit:
type: rabbit
environment:
spring:
rabbitmq:
host: localhost
port: 5672
username: guest
password: guest
virtual-host: rahul_host
kafka:
type: kafka
json:
cuttoff:
size:
limit: 1000
CloudStreamConfig.java
#Configuration
public class CloudStreamConfig {
private static final Logger log = LoggerFactory.getLogger(CloudStreamConfig.class);
#Autowired
ChunkService chunkService;
#Bean
public Function<Message<RmaValues>,Collection<Message<RmaValues>>> handle() {
return rmaValue -> {
log.info("processor runs : message received with request id : {}", rmaValue.getPayload().getRequestId());
ArrayList<Message<RmaValues>> msgList = new ArrayList<Message<RmaValues>>();
try {
List<RmaValues> dividedJson = chunkService.getDividedJson(rmaValue.getPayload());
for(RmaValues rmaValues : dividedJson) {
msgList.add(MessageBuilder.withPayload(rmaValues).build());
}
} catch (Exception e) {
e.printStackTrace();
}
Channel channel = rmaValue.getHeaders().get(AmqpHeaders.CHANNEL, Channel.class);
Long deliveryTag = rmaValue.getHeaders().get(AmqpHeaders.DELIVERY_TAG, Long.class);
// try {
// channel.basicAck(deliveryTag, false);
// } catch (IOException e) {
// e.printStackTrace();
// }
return msgList;
};
};
#ServiceActivator(inputChannel = "error-topic")
public void errorHandler(ErrorMessage em) {
log.info("---------------------------------------got error message over errorChannel: {}", em);
if (null != em.getPayload() && em.getPayload() instanceof KafkaSendFailureException) {
KafkaSendFailureException kafkaSendFailureException = (KafkaSendFailureException) em.getPayload();
if (kafkaSendFailureException.getRecord() != null && kafkaSendFailureException.getRecord().value() != null
&& kafkaSendFailureException.getRecord().value() instanceof byte[]) {
log.warn("error channel message. Payload {}", new String((byte[])(kafkaSendFailureException.getRecord().value())));
}
}
}
KafkaProducerConfiguration.java
#Configuration
public class KafkaProducerConfiguration {
#Value(value = "${spring.kafka.bootstrap-servers}")
private String bootstrapAddress;
#Bean
public ProducerFactory<String, Object> producerFactory() {
Map<String, Object> configProps = new HashMap<>();
configProps.put(
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
bootstrapAddress);
configProps.put(
ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
StringSerializer.class);
configProps.put(
ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
StringSerializer.class);
return new DefaultKafkaProducerFactory<>(configProps);
}
#Bean
public KafkaTemplate<String, String> kafkaTemplate() {
return new KafkaTemplate(producerFactory());
}
RmModelOutputIngestionApplication.java
#SpringBootApplication(scanBasePackages = "com.abb.rm")
public class RmModelOutputIngestionApplication {
private static final Logger LOGGER = LogManager.getLogger(RmModelOutputIngestionApplication.class);
public static void main(String[] args) {
SpringApplication.run(RmModelOutputIngestionApplication.class, args);
}
#Bean("objectMapper")
public ObjectMapper objectMapper() {
ObjectMapper mapper = new ObjectMapper();
LOGGER.info("Returning object mapper...");
return mapper;
}
First, it seems like you are creating too much unnecessary code. Why do you have ObjectMapper? Why do you have KafkaTemplate? Why do you have ProducerFactory? These are all already provided for you.
You really only have to have one function and possibly an error handler - depending on error handling strategy you select, which brings me to the error handling topic. There are 3 primary ways of handling errors. Here is the link to the doc explaining them all and providing samples. Please read thru that and modify your app accordingly and if something doesn't work or unclear feel free to follow up.

Observing frequent set_autocommit = ? and db.SQL.innodb.rows mysql spikes with springboot mysql 5 with 3 pod deployment

Observing high number of set_autocommit = ?
Observing peoridic (20 mins) spike in rds performance insight
Running versions:
id 'org.springframework.boot' version '2.5.4'
id 'io.spring.dependency-management' version '1.0.11.RELEASE'
id "com.palantir.docker" version "0.26.0"
id "com.palantir.docker-run" version "0.26.0"
id 'pl.allegro.tech.build.axion-release' version '1.13.2'
id 'com.appland.appmap' version '1.1.0'
3 pods running on aws eks
db -> aws rds 5.7.mysql_aurora.2.10.1
Here are the configurations:
application.yml
spring:
application:
name: xyz
profiles:
# The commented value for `active` can be replaced with valid Spring profiles to load.
# Otherwise, it will be filled in by gradle when building the JAR file
# Either way, it can be overridden by `--spring.profiles.active` value passed in the commandline or `-Dspring.profiles.active` set in `JAVA_OPTS`
active: dev
group:
dev:
- dev
- api-docs
# Uncomment to activate TLS for the dev profile
#- tls
prod:
- prod
- api-docs
# Uncomment to activate TLS for the dev profile
#- tls
stage:
- stage
jmx:
enabled: false
data:
web:
pageable:
default-page-size: 20
max-page-size: 20
jpa:
repositories:
bootstrap-mode: deferred
jpa:
open-in-view: false
properties:
hibernate.jdbc.time_zone: UTC
hibernate.id.new_generator_mappings: true
hibernate.connection.provider_disables_autocommit: true #https://vladmihalcea.com/why-you-should-always-use-hibernate-connection-provider_disables_autocommit-for-resource-local-jpa-transactions/
hibernate.cache.use_second_level_cache: true
hibernate.cache.region.factory_class: org.hibernate.cache.ehcache.EhCacheRegionFactory
hibernate.cache.use_query_cache: true
hibernate.javax.cache.missing_cache_strategy: create
# modify batch size as necessary
hibernate.jdbc.batch_size: 20
hibernate.order_inserts: true
hibernate.order_updates: true
hibernate.batch_versioned_data: true
hibernate.query.fail_on_pagination_over_collection_fetch: true
hibernate.query.in_clause_parameter_padding: true
hibernate.dialect: org.hibernate.dialect.MySQL5Dialect
javax.persistent.sharedCache.mode: ENABLE_SELECTIVE
hibernate:
ddl-auto: none
naming:
physical-strategy: org.springframework.boot.orm.jpa.hibernate.SpringPhysicalNamingStrategy
implicit-strategy: org.springframework.boot.orm.jpa.hibernate.SpringImplicitNamingStrategy
messages:
basename: i18n/messages
main:
allow-bean-definition-overriding: true
task:
execution:
thread-name-prefix: xyz-task-
pool:
core-size: 2
max-size: 50
queue-capacity: 10000
scheduling:
thread-name-prefix: catalogue-scheduling-
pool:
size: 2
thymeleaf:
mode: HTML
output:
ansi:
console-available: true
server:
servlet:
session:
cookie:
http-only: true
tomcat:
mbeanregistry:
enabled: true
threads:
max: 100
compression:
enabled: true
mime-types: "text/html,text/xml,text/plain,text/css,text/javascript,application/javascript,application/json"
min-response-size: 1024
port: 8080
# Properties to be exposed on the /info management endpoint
info:
# Comma separated list of profiles that will trigger the ribbon to show
display-ribbon-on-profiles: 'dev'
management:
endpoints:
web:
exposure:
include: "health,info,metrics,prometheus"
endpoint:
health:
probes:
enabled: true
show-details: always
show-components: always
application-prod.yml
logging:
level:
ROOT: INFO
org.hibernate.SQL: ERROR
com.pitstop.catalogue: INFO
com.zaxxer.hikari: INFO
config: classpath:logback-prod.xml
spring:
devtools:
restart:
enabled: true
additional-exclude: static/**
jackson:
serialization:
indent-output: true
datasource:
auto-commit: false
type: com.zaxxer.hikari.HikariDataSource
url: ${SPRING_DATASOURCE_URL}
username: ${SPRING_DATASOURCE_USERNAME}
password: ${SPRING_DATASOURCE_PASSWORD}
hikari:
poolName: CatalogJPAHikariCP
minimumIdle: 10
maximumPoolSize: 120
connectionTimeout: 30000
idleTimeout: 600000
maxLifetime: 1800000
auto-commit: false
data-source-properties:
testWhileIdle: true
validationQuery: SELECT 1 FROM DUAL
cachePrepStmts: true
prepStmtCacheSize: 250
prepStmtCacheSqlLimit: 2048
useServerPrepStmts: true
useLocalSessionState: true
rewriteBatchedStatements: true
cacheResultSetMetadata: true
cacheServerConfiguration: true
maintainTimeStats: true
servlet:
multipart:
location: /data/tmp
jpa:
hibernate:
ddl-auto: none
properties:
spring.jpa.show-sql: false
hibernate.generate_statistics: false
liquibase:
contexts: prod
messages:
cache-duration: PT1S # 1 second, see the ISO 8601 standard
thymeleaf:
cache: false
sleuth:
sampler:
probability: 1 # report 100% of traces
Example of one entity:
#Entity
#Table(name = "product_model")
#org.hibernate.annotations.Cache(usage = CacheConcurrencyStrategy.READ_WRITE)
public class ProductModel implements Serializable {
private static final long serialVersionUID = 1L;
#Id
#GeneratedValue(strategy = GenerationType.IDENTITY)
private Long id;
#NotNull
#Size(max = 50)
#Column(name = "name", length = 50, nullable = false)
private String name;
#ManyToOne(optional = false, fetch = FetchType.LAZY)
#JsonIgnoreProperties(value = {"productModels"}, allowSetters = true)
private ProductMake productMake;
#OneToMany(mappedBy = "productModel", fetch = FetchType.LAZY)
#JsonIgnoreProperties(value = {"product", "productModel"}, allowSetters = true)
#BatchSize(size = 20)
#org.hibernate.annotations.Cache(usage = CacheConcurrencyStrategy.READ_WRITE)
private Set<ProductModelMapping> productModelMappings = new HashSet<>();
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
public ProductMake getProductMake() {
return this.productMake;
}
public void setProductMake(ProductMake productMake) {
this.productMake = productMake;
}
public Set<ProductModelMapping> getProductModelMappings() {
return this.productModelMappings;
}
public void setProductModelMappings(Set<ProductModelMapping> productModelMappings) {
if (this.productModelMappings != null) {
this.productModelMappings.forEach(i -> i.setProductModel(null));
}
if (productModelMappings != null) {
productModelMappings.forEach(i -> i.setProductModel(this));
}
this.productModelMappings = productModelMappings;
}
#Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof ProductModel)) {
return false;
}
return id != null && id.equals(((ProductModel) o).id);
}
#Override
public int hashCode() {
return getClass().hashCode();
}
#Override
public String toString() {
return "ProductModel{" +
"id=" + getId() +
", name='" + getName() + "'" +
"}";
}
}
One of the JPA repo:
#Repository
public interface BrandRepository extends JpaRepository<Brand, Long>, JpaSpecificationExecutor<Brand> {
}
One of the service methods:
I keep #Transactional at service class level
Do not keep #Transactional on save / update service layer methods
#Transactional(readOnly = true)
public Page<BrandDto> findAll(Pageable pageable) {
log.debug("Request to get all Brands");
return baseService.findAndBuild(pageable, brandRepository);
}
#Override
public Page<T> findAndBuild(Pageable pageable, JpaRepository<E, K> repository) {
final Page<E> pageEntityResponse = repository.findAll(pageable);
return pageConverter(pageEntityResponse);
}
Couple of Issues that I am facing is :
lot of set_autocommit = ? being fired
db.SQL.innodb.rows mysql spikes in every 20 minutes interval
I see you’ve got AppMap enabled.
You may try the option appmap.recording.auto as described at https://appland.com/docs/reference/appmap-java.html.
Start your server, let it run for a bit, shut it down and you should see those queries being issued (by the connection pool, I suspect). You can include the package names of the connection pool, ORM, and database driver in your appmap.yml if you want to extra detail.
Additional help is available in Discord - https://discord.com/invite/N9VUap6

Exclude specific controllers from sleuth / brave to trace

we are using brave api in our spring boot application. We are able to trace all controllers and services. But issue is we are getting traces in some controllers which we do not want to trace like health check controller. Is there any way to specify this in controller as by default it is tracing all controllers.
I have tried using
spring:
application:
name: abc
sleuth:
enabled: "true"
reporter:
enabled: "true"
sampler:
probability: "1.0"
instrument:
web:
skipPattern: (^status* | ^Status* | *status*)
and
spring:
application:
name: abc
sleuth:
enabled: "true"
reporter:
enabled: "true"
sampler:
probability: "1.0"
web:
skipPattern: (^status* | ^Status* | *status*)
But it did not work. Status controller
#RestController
public class StatusController {
#Autowired
public StatusController() {
}
#RequestMapping(value = "/status", method = RequestMethod.GET)
public Boolean status() {
return true;
}
}
Please help.
Thanks
*status* is not a valid regex. Try just /status. You can check the https://github.com/spring-cloud/spring-cloud-sleuth/blob/v2.1.4.RELEASE/spring-cloud-sleuth-core/src/main/java/org/springframework/cloud/sleuth/instrument/web/SleuthWebProperties.java#L34 for the default. Also, maybe it makes more sense to use the additionalSkipPattern property to append your custom values to existing ones.

Spring boot not registering on promethes end point

I am trying to configure Prometheus and Grafana with spring boot.
#Configuration
#EnableSpringBootMetricsCollector
public class MetricsConfiguration {
/**
* Register common tags application instead of job. This application tag is
* needed for Grafana dashboard.
*
* #return registry with registered tags.
*/
#Value("${spring.application.name}")
private String applicationName;
#Value("${spring.profiles.active}")
private String environment;
#Bean
public MeterRegistryCustomizer<MeterRegistry> metricsCommonTags() {
return registry -> {
registry.config().commonTags("application", applicationName, "environment", environment)
.meterFilter(getDefualtConfig());
};
}
private MeterFilter getDefualtConfig() {
return new MeterFilter() {
#Override
public DistributionStatisticConfig configure(Meter.Id id, DistributionStatisticConfig config) {
return DistributionStatisticConfig.builder().percentilesHistogram(true).percentiles(0.95, 0.99).build()
.merge(config);
}
};
}
}
while running the application I am able to see traing on localhost:8080/prometheus url.
but same I am not able to see on localhost:9090/metrics url which is Prometheus URL.
I have added the configuration in prometheus.yml and restarted the Prometheus.
- job_name: 'my-api'
scrape_interval: 10s
metrics_path: '/prometheus'
target_groups:
- targets: ['localhost:8080']
After spending 2 hours found the solution,
we were using basic auth for all health points also
The issue was that I was not setting up basic auth in my proemtheus.yml
- job_name: 'my-api'
scrape_interval: 10s
metrics_path: '/prometheus'
target_groups:
- targets: ['localhost:8080']
basic_auth:
username: test
password: test

Spring Cloud Gateway API - Context-path on routes not working

I have setup context-path in application.yml
server:
port: 4177
max-http-header-size: 65536
tomcat.accesslog:
enabled: true
servlet:
context-path: /gb-integration
And I have configured some routes
#Bean
public RouteLocator routeLocator(RouteLocatorBuilder builder) {
final String sbl = "http://localhost:4178";
return builder.routes()
//gb-sbl-rest
.route("sbl", r -> r
.path("/sbl/**")
.filters(f -> f.rewritePath("/sbl/(?<segment>.*)", "/gb-sbl/${segment}"))
.uri(sbl)).build();
}
I want the API gateway to be reached using localhost:4177/gb-integration/sbl/**
However it is only working on localhost:4177/sbl/**
It seems my context-path is ignored.
Any ideas how I can get my context-path to work on all my routes?
You probably already figuered it out by your self, but here is what is working for me:
After reading the Spring Cloud documentation and having tryied many things on my own, I have eventually opted for a route by route configuration. In your case, it would look something like this:
.path("/gb-integration/sbl/**")
and repeat the same pattern for every route.
.path("/gb-integration/abc/**")
...
.path("/gb-integration/def/**")
You can actually see this in spring cloud documentation.
The spring clould documentation seems to be in progress. Hopefully, we shall find a better solution.
Detailing on #sendon1982 answer
If your service is exposed at localhost:8080/color/red and you want it to be accessible from gateway as localhost:9090/gateway/color/red, In the Path param of predicates, prepend the /gateway, and add StripPrefix as 1 in filters, which basically translates to
take the requested path which matches Path, strip/remove out the prefix paths till the number mentioned and route using given uri and the stripped path
my-app-gateway: /gateway
spring:
cloud:
gateway:
routes:
- id: color-service
uri: http://localhost:8080
predicates:
- Path=${my-app-gateway}/color/**
filters:
- StripPrefix=1
Using yaml file like this
spring:
cloud:
gateway:
routes:
- id: property-search-service-route
uri: http://localhost:4178
predicates:
- Path=/gb-integration/sbl/**
fixed :
application.yaml:
gateway:
discovery:
locator:
enabled: true
lower-case-service-id: true
filters:
# 去掉 /ierp/[serviceId] 进行转发
- StripPath=2
predicates:
- name: Path
# 路由匹配 /ierp/[serviceId]
# org.springframework.cloud.gateway.discovery.DiscoveryClientRouteDefinitionLocator#getRouteDefinitions
args[pattern]: "'/ierp/'+serviceId+'/**'"
filter:
#Component
public class StripPathGatewayFilterFactory extends
AbstractGatewayFilterFactory<StripPathGatewayFilterFactory.Config> {
/**
* Parts key.
*/
public static final String PARTS_KEY = "parts";
public StripPathGatewayFilterFactory() {
super(StripPathGatewayFilterFactory.Config.class);
}
#Override
public List<String> shortcutFieldOrder() {
return Arrays.asList(PARTS_KEY);
}
#Override
public GatewayFilter apply(Config config) {
return (exchange, chain) -> {
ServerHttpRequest request = exchange.getRequest();
ServerWebExchangeUtils.addOriginalRequestUrl(exchange, request.getURI());
String path = request.getURI().getRawPath();
String[] originalParts = StringUtils.tokenizeToStringArray(path, "/");
// all new paths start with /
StringBuilder newPath = new StringBuilder("/");
for (int i = 0; i < originalParts.length; i++) {
if (i >= config.getParts()) {
// only append slash if this is the second part or greater
if (newPath.length() > 1) {
newPath.append('/');
}
newPath.append(originalParts[i]);
}
}
if (newPath.length() > 1 && path.endsWith("/")) {
newPath.append('/');
}
ServerHttpRequest newRequest = request.mutate().path(newPath.toString()).contextPath(null).build();
exchange.getAttributes().put(ServerWebExchangeUtils.GATEWAY_REQUEST_URL_ATTR, newRequest.getURI());
return chain.filter(exchange.mutate().request(newRequest).build());
};
}
public static class Config {
private int parts;
public int getParts() {
return parts;
}
public void setParts(int parts) {
this.parts = parts;
}
}
}

Resources