Spring kafka no message received - spring

I'm using spring boot 2.2.4-RELEASE, spring-kafka 2.4.2.RELEASE
My scenario is the following one:
In my microservice (let's call it producer microservice) I need to create kakfa topic and then, on some circumstances, I need to send message over a single topic.
This message must be received and handled by another microservice (let's call it consumer microservice). In this consumer microservice I must create kafka-listener every time a new topic is created on the server side.
So I wrote the folloqing code
producer microservice
spring kafka config:
#Configuration
public class WebmailKafkaConfig {
#Autowired
private Environment environment;
#Bean
public KafkaAdmin kafkaAdmin(){
Map<String, Object> configuration = new HashMap<String, Object>();
configuration.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, environment.getProperty("webmail.be.messaging.kafka.bootstrap.address"));
KafkaAdmin result = new KafkaAdmin(configuration);
return result;
}
#Bean
public ProducerFactory<String, RicezioneMailMessage> producerFactory() {
Map<String, Object> configProps = new HashMap<>();
configProps.put( ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, environment.getProperty("webmail.be.messaging.kafka.bootstrap.address"));
configProps.put( ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
//configProps.put( ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
configProps.put( ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
return new DefaultKafkaProducerFactory<>(configProps);
}
#Bean("ricezioneMailMessageKafkaTemplate")
public KafkaTemplate<String, RicezioneMailMessage> ricezioneMailMessageKafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
}
spring service kafka manager
#Service
public class WebmailKafkaTopicSvcImpl implements WebmailKafkaTopicSvc {
private static final Logger logger = LoggerFactory.getLogger(WebmailKafkaTopicSvcImpl.class.getName());
#Autowired
private KafkaAdmin kafkaAdmin;
#Value("${webmail.be.messaging.kafka.topic.numero.partizioni}")
private int numeroPartizioni;
#Value("${webmail.be.messaging.kafka.topic.fattore.replica}")
private short fattoreReplica;
#Autowired
#Qualifier("ricezioneMailMessageKafkaTemplate")
private KafkaTemplate<String, RicezioneMailMessage> ricezioneMailMessageKafkaTemplate;
#Override
public void createKafkaTopic(String topicName) throws Exception {
if(!StringUtils.hasText(topicName)){
throw new IllegalArgumentException("Passato un topic name non valido ["+topicName+"]");
}
AdminClient adminClient = null;
try{
adminClient = AdminClient.create(kafkaAdmin.getConfig());
List<NewTopic> topics = new ArrayList<>(1);
NewTopic topic = new NewTopic(topicName, numeroPartizioni, fattoreReplica);
topics.add(topic);
CreateTopicsResult result = adminClient.createTopics(topics);
result.all().whenComplete(new KafkaFuture.BiConsumer<Void, Throwable>() {
#Override
public void accept(Void aVoid, Throwable throwable) {
if( throwable != null ){
logger.error("Errore creazione topic", throwable);
}
}
});
}finally {
if( adminClient != null ){
adminClient.close();
}
}
}
#Override
public void sendMessage(RicezioneMailMessage rmm) throws Exception {
ListenableFuture<SendResult<String, RicezioneMailMessage>> future = ricezioneMailMessageKafkaTemplate.send(rmm.getPk(), rmm);
future.addCallback(new ListenableFutureCallback<SendResult<String, RicezioneMailMessage>>() {
#Override
public void onFailure(Throwable ex) {
if( logger.isWarnEnabled() ){
logger.warn("Impossibile inviare il messaggio=["
+ rmm + "] a causa di : " + ex.getMessage(),ex);
}
}
#Override
public void onSuccess(SendResult<String, RicezioneMailMessage> result) {
if(logger.isTraceEnabled()){
logger.trace("Inviato messaggio=[" + rmm +
"] con offset=[" + result.getRecordMetadata().offset() + "]");
}
}
});
}
}
In the producer side all works pretty good. I'm able in creating topics and sending messages.
consumer microservice
dynamic listener class
public class DynamicKafkaConsumer {
private final String brokerAddress;
private final String topicName;
private boolean stopTest;
private static final Logger logger = LoggerFactory.getLogger(DynamicKafkaConsumer.class.getName());
public DynamicKafkaConsumer(String brokerAddress, String topicName) {
if( !StringUtils.hasText(brokerAddress)){
throw new IllegalArgumentException("Passato un broker address non valido");
}
if( !StringUtils.hasText(topicName)){
throw new IllegalArgumentException("Passato un topicName non valido");
}
this.brokerAddress = brokerAddress;
this.topicName = topicName;
if( logger.isTraceEnabled() ){
logger.trace("Creato {} con topicName {} e brokerAddress {}", this.getClass().getName(), this.topicName, this.brokerAddress);
}
}
public final void start() {
MessageListener<String, RicezioneMailMessage> messageListener = (record -> {
RicezioneMailMessage messaggioRicevuto = record.value();
if( logger.isInfoEnabled() ){
logger.info("Ricevuto messaggio {} su topic {}", messaggioRicevuto, topicName);
}
stopTest = true;
});
ConcurrentMessageListenerContainer<String, RicezioneMailMessage> container =
new ConcurrentMessageListenerContainer<>(
consumerFactory(brokerAddress),
containerProperties(topicName, messageListener));
container.start();
}
private DefaultKafkaConsumerFactory<String, RicezioneMailMessage> consumerFactory(String brokerAddress) {
return new DefaultKafkaConsumerFactory<>(
consumerConfig(brokerAddress),
new StringDeserializer(),
new JsonDeserializer<>(RicezioneMailMessage.class));
}
private ContainerProperties containerProperties(String topic, MessageListener<String, RicezioneMailMessage> messageListener) {
ContainerProperties containerProperties = new ContainerProperties(topic);
containerProperties.setMessageListener(messageListener);
return containerProperties;
}
private Map<String, Object> consumerConfig(String brokerAddress) {
return Map.of(
BOOTSTRAP_SERVERS_CONFIG, brokerAddress,
GROUP_ID_CONFIG, "groupId",
AUTO_OFFSET_RESET_CONFIG, "earliest",
ALLOW_AUTO_CREATE_TOPICS_CONFIG, "false"
);
}
public boolean isStopTest() {
return stopTest;
}
public void setStopTest(boolean stopTest) {
this.stopTest = stopTest;
}
}
simple unit test
public class TestRicezioneMessaggiCasellaPostale {
private static final Logger logger = LoggerFactory.getLogger(TestRicezioneMessaggiCasellaPostale.class.getName());
#Test
public void testRicezioneMessaggiMail() {
try {
String brokerAddress = "localhost:9092";
DynamicKafkaConsumer consumer = new DynamicKafkaConsumer(brokerAddress, "f586caf2-ffdc-4e3a-88b9-a262a502f8ac");
consumer.start();
boolean stopTest = consumer.isStopTest();
while (!stopTest) {
stopTest = consumer.isStopTest();
}
} catch (Exception e) {
logger.error("Errore nella configurazione della casella postale; {}", e.getMessage(), e);
}
}
}
In the consumer side I can't read any message; note that the topic "f586caf2-ffdc-4e3a-88b9-a262a502f8ac" exsists and it's the same topic used on the producer side.
When I send a message on the producer side I can see this log:
2020-02-19 22:00:22,320 52822 [kafka-producer-network-thread | producer-1] TRACE i.e.t.r.p.w.b.s.i.WebmailKafkaTopicSvcImpl - Inviato messaggio=[RicezioneMailMessage{pk='c5c8f8a4-8ddd-407a-9e51-f6b14d84f304', tipoMessaggio='mail'}] con offset=[0]
On the consumer side I don't see any message. I just see the following prints:
2020-02-19 22:00:03,194 1442 [main] INFO o.a.k.c.consumer.ConsumerConfig - ConsumerConfig values:
allow.auto.create.topics = false
auto.commit.interval.ms = 5000
auto.offset.reset = earliest
bootstrap.servers = [localhost:9092]
check.crcs = true
client.dns.lookup = default
client.id =
client.rack =
connections.max.idle.ms = 540000
default.api.timeout.ms = 60000
enable.auto.commit = false
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = groupId
group.instance.id = null
heartbeat.interval.ms = 3000
interceptor.classes = []
internal.leave.group.on.close = true
isolation.level = read_uncommitted
key.deserializer = class org.apache.kafka.common.serialization.StringDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 300000
max.poll.records = 500
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes
= 65536
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retry.backoff.ms = 100
sasl.client.callback.handler.class = null
sasl.jaas.config = null
sasl.kerberos.kinit.cmd = /usr/bin/kinit sasl.kerberos.min.time.before.relogin = 60000
sasl.kerberos.service.name = null
sasl.kerberos.ticket.renew.jitter = 0.05
sasl.kerberos.ticket.renew.window.factor = 0.8
sasl.login.callback.handler.class = null
sasl.login.class = null
sasl.login.refresh.buffer.seconds = 300
sasl.login.refresh.min.period.seconds = 60
sasl.login.refresh.window.factor = 0.8
sasl.login.refresh.window.jitter = 0.05
sasl.mechanism = GSSAPI
security.protocol = PLAINTEXT
security.providers = null
send.buffer.bytes = 131072
session.timeout.ms = 10000
ssl.cipher.suites = null
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1]
ssl.endpoint.identification.algorithm = https
ssl.key.password = null
ssl.keymanager.algorithm = SunX509
ssl.keystore.location = null
ssl.keystore.password = null
ssl.keystore.type = JKS
ssl.protocol = TLS
ssl.provider = null
ssl.secure.random.implementation = null
ssl.trustmanager.algorithm = PKIX
ssl.truststore.location = null
ssl.truststore.password = null
ssl.truststore.type = JKS
value.deserializer = class org.springframework.kafka.support.serializer.JsonDeserializer
2020-02-19 22:00:03,630 1878 [main] INFO o.a.k.common.utils.AppInfoParser - Kafka version: 2.4.0
2020-02-19 22:00:03,630 1878 [main] INFO o.a.k.common.utils.AppInfoParser - Kafka commitId: 77a89fcf8d7fa018
2020-02-19 22:00:03,630 1878 [main] INFO o.a.k.common.utils.AppInfoParser - Kafka startTimeMs: 1582146003626
2020-02-19 22:00:03,636 1884 [main] INFO o.a.k.c.consumer.KafkaConsumer - [Consumer clientId=consumer-groupId-1, groupId=groupId] Subscribed to topic(s): f586caf2-ffdc-4e3a-88b9-a262a502f8ac
2020-02-19 22:00:03,645 1893 [main] INFO o.s.s.c.ThreadPoolTaskScheduler - Initializing ExecutorService
2020-02-19 22:00:03,667 1915 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Commit list: {}
2020-02-19 22:00:04,123 2371 [consumer-0-C-1] INFO org.apache.kafka.clients.Metadata - [Consumer clientId=consumer-groupId-1, groupId=groupId] Cluster ID: hOOJH-WNTNiXD4il0Y7_0Q
2020-02-19 22:00:05,052 3300 [consumer-0-C-1] INFO o.a.k.c.c.i.AbstractCoordinator - [Consumer clientId=consumer-groupId-1, groupId=groupId] Discovered group coordinator localhost:9092 (id: 2147483647 rack: null)
2020-02-19 22:00:05,059 3307 [consumer-0-C-1] INFO o.a.k.c.c.i.AbstractCoordinator - [Consumer clientId=consumer-groupId-1, groupId=groupId] (Re-)joining group
2020-02-19 22:00:05,116 3364 [consumer-0-C-1] INFO o.a.k.c.c.i.AbstractCoordinator - [Consumer clientId=consumer-groupId-1, groupId=groupId] (Re-)joining group
2020-02-19 22:00:05,154 3402 [consumer-0-C-1] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-groupId-1, groupId=groupId] Finished assignment for group at generation 1: {consumer-groupId-1-41df9153-7c33-46b1-8274-2d7ee2bfb35c=org.apache.kafka.clients.consumer.ConsumerPartitionAssignor$Assignment#a95df1b}
2020-02-19 22:00:05,327 3575 [consumer-0-C-1] INFO o.a.k.c.c.i.AbstractCoordinator - [Consumer clientId=consumer-groupId-1, groupId=groupId] Successfully joined group with generation 1
2020-02-19 22:00:05,335 3583 [consumer-0-C-1] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-groupId-1, groupId=groupId] Adding newly assigned partitions: f586caf2-ffdc-4e3a-88b9-a262a502f8ac-0
2020-02-19 22:00:05,363 3611 [consumer-0-C-1] INFO o.a.k.c.c.i.ConsumerCoordinator - [Consumer clientId=consumer-groupId-1, groupId=groupId] Found no committed offset for partition f586caf2-ffdc-4e3a-88b9-a262a502f8ac-0
2020-02-19 22:00:05,401 3649 [consumer-0-C-1] INFO o.a.k.c.c.i.SubscriptionState - [Consumer clientId=consumer-groupId-1, groupId=groupId] Resetting offset for partition f586caf2-ffdc-4e3a-88b9-a262a502f8ac-0 to offset 0.
2020-02-19 22:00:05,404 3652 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Committing on assignment: {f586caf2-ffdc-4e3a-88b9-a262a502f8ac-0=OffsetAndMetadata{offset=0, leaderEpoch=null, metadata=''}}
2020-02-19 22:00:05,432 3680 [consumer-0-C-1] INFO o.s.k.l.ConcurrentMessageListenerContainer - groupId: partitions assigned: [f586caf2-ffdc-4e3a-88b9-a262a502f8ac-0]
2020-02-19 22:00:08,669 6917 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Received: 0 records
2020-02-19 22:00:08,670 6918 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Commit list: {}
2020-02-19 22:00:13,671 11919 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Received: 0 records
2020-02-19 22:00:13,671 11919 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Commit list: {}
2020-02-19 22:00:18,673 16921 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Received: 0 records
2020-02-19 22:00:18,673 16921 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Commit list: {}
2020-02-19 22:00:23,674 21922 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Received: 0 records
2020-02-19 22:00:23,674 21922 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Commit list: {}
2020-02-19 22:00:28,676 26924 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Received: 0 records
2020-02-19 22:00:28,676 26924 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Commit list: {}
2020-02-19 22:00:33,677 31925 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Received: 0 records
2020-02-19 22:00:33,677 31925 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Commit list: {}
2020-02-19 22:00:38,678 36926 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Received: 0 records
2020-02-19 22:00:38,678 36926 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Commit list: {}
2020-02-19 22:00:43,678 41926 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Received: 0 records
2020-02-19 22:00:43,679 41927 [consumer-0-C-1] DEBUG o.s.k.l.KafkaMessageListenerContainer$ListenerConsumer - Commit list: {}
Can anybody tell me where I'm wrong?
Thank you
Angelo

I found the root cause of your code.
Code to send message and log from client side:
ricezioneMailMessageKafkaTemplate.send(rmm.getPk(), rmm);
Inviato messaggio=[RicezioneMailMessage{pk='c5c8f8a4-8ddd-407a-9e51-f6b14d84f304', tipoMessaggio='mail'}] con offset=[0]
Code and log from consumer:
DynamicKafkaConsumer consumer = new DynamicKafkaConsumer(brokerAddress, "f586caf2-ffdc-4e3a-88b9-a262a502f8ac");
2020-02-19 22:00:03,636 1884 [main] INFO o.a.k.c.consumer.KafkaConsumer - [Consumer clientId=consumer-groupId-1, groupId=groupId] Subscribed to topic(s): f586caf2-ffdc-4e3a-88b9-a262a502f8ac
You are sending to topic: c5c8f8a4-8ddd-407a-9e51-f6b14d84f304
You are listening on topic: f586caf2-ffdc-4e3a-88b9-a262a502f8ac
Producer/consumer are sending/listening on difference topics.

Related

Kafka disconnection with log Node 1 disconnected

When using kafka to communicate between services, I get the following logs. Then the events are not received by consumer or producer is not able to sends events:
2023-02-01 03:42:40.614 INFO 3524 --- [ad | producer-1] org.apache.kafka.clients.NetworkClient : [Producer clientId=producer-1] Node 1 disconnected.
2023-02-01 03:47:41.228 INFO 3524 --- [ad | producer-1] org.apache.kafka.clients.NetworkClient : [Producer clientId=producer-1] Node 0 disconnected.
2023-02-01 03:47:41.228 INFO 3524 --- [ad | producer-1] org.apache.kafka.clients.NetworkClient : [Producer clientId=producer-1] Cancelled in-flight METADATA request with correlation id 617 due to node 0 being disconnected (elapsed time since creation: 108ms, elapsed time since send: 108ms, request timeout: 2000ms)
2023-02-01 05:02:48.399 INFO 3524 --- [ad | producer-1] org.apache.kafka.clients.NetworkClient : [Producer clientId=producer-1] Node 2 disconnected.
Configuration looks like this:
package com.bbh.ilab.aip.presentationservice.config
import com.bbh.ilab.aip.commons.kafka.BaseEvent
import com.bbh.ilab.aip.commons.kafka.EventPayload
import org.apache.kafka.clients.producer.ProducerConfig
import org.apache.kafka.common.config.SslConfigs
import org.apache.kafka.common.serialization.StringSerializer
import org.springframework.context.annotation.Bean
import org.springframework.context.annotation.Configuration
import org.springframework.kafka.core.DefaultKafkaProducerFactory
import org.springframework.kafka.core.KafkaTemplate
import org.springframework.kafka.core.ProducerFactory
import org.springframework.kafka.support.serializer.JsonSerializer
#Configuration
class KafkaProducerConfig(
private val environmentProperties: EnvironmentProperties
) {
#Bean
fun producerFactory(): ProducerFactory<String, BaseEvent<EventPayload>> {
val configProps = getStandardConfig()
if (environmentProperties.kafka.sslEnabled) {
addSslConfig(configProps)
}
return DefaultKafkaProducerFactory(configProps)
}
#Bean
fun kafkaTemplate(): KafkaTemplate<String, BaseEvent<EventPayload>> {
return KafkaTemplate(producerFactory())
}
private fun getStandardConfig(): MutableMap<String, Any> {
val configProps: MutableMap<String, Any> = HashMap()
configProps[ProducerConfig.BOOTSTRAP_SERVERS_CONFIG] = environmentProperties.kafka.bootstrapUrl
configProps[ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG] = StringSerializer::class.java
configProps[ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG] = JsonSerializer::class.java
configProps[ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG] = environmentProperties.kafka.retry.deliveryTimeoutMs
configProps[ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG] = environmentProperties.kafka.retry.requestTimeoutMs
configProps[ProducerConfig.RETRY_BACKOFF_MS_CONFIG] = environmentProperties.kafka.retry.retryBackoffMs
return configProps
}
private fun addSslConfig(configProps: MutableMap<String, Any>) {
configProps["security.protocol"] = "SSL"
configProps[SslConfigs.SSL_KEYSTORE_TYPE_CONFIG] = "PKCS12"
configProps[SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG] = "PKCS12"
configProps[SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG] = environmentProperties.kafka.keystorePath
configProps[SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG] = environmentProperties.kafka.keystorePass
configProps[SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG] = environmentProperties.kafka.truststorePath
configProps[SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG] = environmentProperties.kafka.truststorePass
configProps[SslConfigs.SSL_KEY_PASSWORD_CONFIG] = environmentProperties.kafka.keystorePass
}
}
Events are sent by this code:
fun sendSplitEvent(configDto: ConfigDto, domain: Domain, timestamp: LocalDateTime) {
val payload = SplitColumnRequestPayload(
configDto.projectId,
domain,
configDto.targetColumn,
configDto.split
)
val event = SplitRequestEvent(UUID.randomUUID(), timestamp, payload)
kafkaTemplate.send(environmentProperties.kafka.topic, event as BaseEvent<EventPayload>)
}
I tried to changing configuration following: https://github.com/strimzi/strimzi-kafka-operator/issues/2729 but still not working. Please help me :)

Spring boot REST service Kafka topic commitSync failing

I have a simple Spring boot service that is called on-demand and consumes specified number of messages from the topic. Number of messages to consume is passed as a parameter. Service is being called every 30 minutes. Each message size is ~1.6 kb. I always get around 1100 or 1200 messages every-time. Have one topic with one partition only and REST service is the only consumer. Here is how the service is called http://example.com/messages?limit=2000
private OutputResponse getNewMessages(String limit) throws Exception {
System.out.println("***** START *****");
final long start = System.nanoTime();
int loopCntr = 0;
int counter = 0;
OutputResponse outputResponse = new OutputResponse();
Output output = new Output();
List<Output> rspListObject = new ArrayList<>();
Consumer<Object, Object> consumer = null;
String result = null;
try {
Properties p = new Properties();
p.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "180000");
p.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, limit);
consumer = consumerFactory.createConsumer("my-group-id", null, null, p);
consumer.assign(Collections.singleton(new TopicPartition("test-topic", 0)));
while (loopCntr < 2) {
loopCntr++;
ConsumerRecords<Object, Object> consumerRecords = consumer.poll(Duration.ofSeconds(15));
for (ConsumerRecord<Object, Object> record : consumerRecords)
{
counter++;
try
{
//get json string
result = mapper.writeValueAsString(record.value());
//to json
output = mapper.readValue(result, Output.class);
rspListObject.add(output);
} catch (Exception e) {
logger.error(e);
insertToDB(record.value(),record.offset());
}
}
}
outputResponse.setObjects(rspListObject);
final long end = System.nanoTime();
System.out.println("Took: " + ((end - start) / 1000000) + "ms");
System.out.println("Took: " + (end - start) / 1000000000 + " seconds");
// commit the offset of records to broker
if (counter > 0) {
consumer.commitSync();
}
} finally {
try {
System.out.println(" >>>>> closing the consumer");
if (consumer != null)
consumer.close();
}catch(Exception e){
//log message
}
}
return outputResponse;
}
this is what I have in application.yml
spring:
kafka:
consumer:
enable-auto-commit: false
auto-offset-reset: latest
key-deserializer: org.springframework.kafka.support.serializer.ErrorHandlingDeserializer
value-deserializer: org.springframework.kafka.support.serializer.ErrorHandlingDeserializer
properties:
spring.deserializer.key.delegate.class: org.apache.kafka.common.serialization.StringDeserializer
spring.deserializer.value.delegate.class: org.springframework.kafka.support.serializer.JsonDeserializer
spring.json.trusted.packages: '*'
max.poll.interval.ms: 300000
group-id: my-group-id
ConsumerConfig values:
allow.auto.create.topics = true
auto.commit.interval.ms = 5000
auto.offset.reset = latest
check.crcs = true
client.dns.lookup = default
client.id =
client.rack =
connections.max.idle.ms = 540000
default.api.timeout.ms = 60000
enable.auto.commit = false
exclude.internal.topics = true
fetch.max.bytes = 52428800
fetch.max.wait.ms = 500
fetch.min.bytes = 1
group.id = my-group-id
group.instance.id = null
heartbeat.interval.ms = 3000
interceptor.classes = []
internal.leave.group.on.close = true
isolation.level = read_uncommitted
key.deserializer = class org.springframework.kafka.support.serializer.ErrorHandlingDeserializer
max.partition.fetch.bytes = 1048576
max.poll.interval.ms = 180000
max.poll.records = 500
metadata.max.age.ms = 300000
metric.reporters = []
metrics.num.samples = 2
metrics.recording.level = INFO
metrics.sample.window.ms = 30000
partition.assignment.strategy = [class org.apache.kafka.clients.consumer.RangeAssignor]
receive.buffer.bytes = 65536
reconnect.backoff.max.ms = 1000
reconnect.backoff.ms = 50
request.timeout.ms = 30000
retry.backoff.ms = 100
This is an error I am getting at commitSync. Tried with consuming 5 messages when doing poll(), tried with setting p.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "180000"); but same error
Commit cannot be completed since the group has already rebalanced and
assigned the partitions to another member. This means that the time
between subsequent calls to poll() was longer than the configured
max.poll.interval.ms, which typically implies that the poll loop is
spending too much time message processing. You can address this either
by increasing max.poll.interval.ms or by reducing the maximum size of
batches returned in poll() with max.poll.records.
I believe that this application simulates your use case but it doesn't exhibit the behavior you describe (as I expected). You should never see a rebalance when manually assigning the topic/partition.
I suggest you run both and compare DEBUG logs to figure out what's wrong.
#SpringBootApplication
public class So63713473Application {
public static void main(String[] args) {
SpringApplication.run(So63713473Application.class, args);
}
#Bean
public NewTopic topic() {
return TopicBuilder.name("so63713473").partitions(1).replicas(1).build();
}
#Bean
public ApplicationRunner runner(ConsumerFactory<String, String> factory, KafkaTemplate<String, String> template) {
String msg = new String(new byte[1600]);
return args -> {
while (true) {
System.out.println("Hit enter to run a consumer");
System.in.read();
int count = 0;
try (Consumer<String, String> consumer = factory.createConsumer("so63713473", "")) {
IntStream.range(0, 1200).forEach(i -> template.send("so63713473", msg));
consumer.assign(Collections.singletonList(new TopicPartition("so63713473", 0)));
while (count < 1200) {
ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(5));
count += records.count();
System.out.println("Count=" + count);
}
consumer.commitSync();
System.out.println("Success");
}
}
};
}
}
spring.kafka.consumer.auto-offset-reset=earliest
spring.kafka.consumer.fetch-min-size=1920000
spring.kafka.consumer.fetch-max-wait=1000
spring.kafka.producer.properties.linger.ms=50
EDIT
I can reproduce your issue by adding a second (auto-assigned) consumer in the same group.
#KafkaListener(id = "so63713473", topics = "so63713473")
public void listen(String in) {
System.out.println(in);
}
2020-09-08 16:40:15.828 ERROR 88813 --- [ main] o.s.boot.SpringApplication : Application run failed
java.lang.IllegalStateException: Failed to execute ApplicationRunner
at org.springframework.boot.SpringApplication.callRunner(SpringApplication.java:789) [spring-boot-2.3.3.RELEASE.jar:2.3.3.RELEASE]
at org.springframework.boot.SpringApplication.callRunners(SpringApplication.java:776) [spring-boot-2.3.3.RELEASE.jar:2.3.3.RELEASE]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:322) [spring-boot-2.3.3.RELEASE.jar:2.3.3.RELEASE]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:1237) [spring-boot-2.3.3.RELEASE.jar:2.3.3.RELEASE]
at org.springframework.boot.SpringApplication.run(SpringApplication.java:1226) [spring-boot-2.3.3.RELEASE.jar:2.3.3.RELEASE]
at com.example.demo.So63713473Application.main(So63713473Application.java:25) [classes/:na]
Caused by: org.apache.kafka.clients.consumer.CommitFailedException: Commit cannot be completed since the group has already rebalanced and assigned the partitions to another member. This means that the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time message processing. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches returned in poll() with max.poll.records.
You can't mix manual and auto assignment in the same group.

Remove file from remote using streaming inbound channel adapter spring boot implementation

I am trying to remove file from remote by implementing streaming inbound but connection is closing before adviceChain implementing.
CODE:
#Bean
public SessionFactory<LsEntry> sftpSessionFactory() {
DefaultSftpSessionFactory factory = new DefaultSftpSessionFactory(true);
factory.setHost(sftpHost);
factory.setPort(sftpPort);
factory.setUser(sftpUser);
factory.setPassword(sftpPwd);
factory.setAllowUnknownKeys(true);
return new CachingSessionFactory<LsEntry>(factory);
}
#Bean
#InboundChannelAdapter(channel = "stream", poller = #Poller(cron = "2 * * * * ?"))
public MessageSource<InputStream> sftpMessageSource() {
SftpStreamingMessageSource messageSource = new SftpStreamingMessageSource(template());
messageSource.setRemoteDirectory(remoteDirecotry);
messageSource.setFilter(new AcceptAllFileListFilter<>());
return messageSource;
}
#Bean
public SftpRemoteFileTemplate template() {
return new SftpRemoteFileTemplate(sftpSessionFactory());
}
#Bean
#Transformer(inputChannel = "stream", outputChannel = "data")
public org.springframework.integration.transformer.Transformer transformer() {
return new StreamTransformer("UTF-8");
}
#ServiceActivator(inputChannel = "data" ,adviceChain = "afterChain")
#Bean
public MessageHandler handler() {
return new MessageHandler() {
#Override
public void handleMessage(Message<?> message) throws MessagingException {
String fileName = message.getHeaders().get("file_remoteFile").toString();
if (!StringUtils.isEmpty(message.toString())) {
else{
log.info("No file found in the Remote location");
}
}
};
}
#Bean
public ExpressionEvaluatingRequestHandlerAdvice afterChain() {
ExpressionEvaluatingRequestHandlerAdvice advice = new ExpressionEvaluatingRequestHandlerAdvice();
advice.setOnSuccessExpression(
"#template.remove(headers['file_remoteDirectory'] + headers['file_remoteFile'])");
//advice.setOnSuccessExpressionString("#template.remove(headers['file_remoteFile'])");
advice.setPropagateEvaluationFailures(true);
return advice;
}
wherever i search every one is suggesting to implement ExpressionEvaluatingRequestHandlerAdvice but it is throwing me below error.
2018-03-27 12:32:02.618 INFO 23216 --- [ask-scheduler-1] o.s.b.c.l.support.SimpleJobLauncher : Job: [FlowJob: [name=starsBatchJob]] completed with the following parameters: [{JobID=1522168322277}] and the following status: [COMPLETED]
2018-03-27 12:32:02.618 INFO 23216 --- [ask-scheduler-1] c.f.u.config.ParentBatchConfiguration : Job Status Completed
2018-03-27 12:32:02.618 INFO 23216 --- [ask-scheduler-1] c.f.u.config.ParentBatchConfiguration : Total time tokk for Stars Batch execution: 0 seconds.
2018-03-27 12:32:02.618 INFO 23216 --- [ask-scheduler-1] c.f.u.config.ParentBatchConfiguration : Batch Job lock is released
2018-03-27 12:32:02.633 INFO 23216 --- [ask-scheduler-1] com.jcraft.jsch : Disconnecting from hpchd1e.hpc.ford.com port 22
2018-03-27 12:32:02.633 ERROR 23216 --- [ask-scheduler-1] o.s.integration.handler.LoggingHandler : org.springframework.messaging.MessagingException: Dispatcher failed to deliver Message; nested exception is org.springframework.messaging.MessagingException: Failed to execute on session; nested exception is org.springframework.core.NestedIOException: Failed to remove file: 2: No such file; nested exception is 2
I had this problem. My path to the remote file was incorrect. I needed a trailing /. It is a little difficult to see since the path is being created inside a Spel Expression. You can see the path using the following in the handleMessage() method.
String remoteDirectory = (String) message.getHeaders().get("file_remoteDirectory");
String remoteFile = (String) message.getHeaders().get("file_remoteFile");
I did have to use the advice.setOnSuccessExpressionString("#template.remove(headers['file_remoteFile'])"); that is commented out above instead of advice.setOnSuccessExpression"#template.remove(headers['file_remoteDirectory'] + headers['file_remoteFile'])");
It is incorrect in the documentation https://docs.spring.io/spring-integration/reference/html/sftp.html#sftp-streaming which is why I believe people who struggle with this lose faith in the doc. But this seems to be the only error.

How to close Kafka ProducerConfig values

2018-03-22 11:50:29.175 INFO 12071 --- [io-26681-exec-1] o.a.k.clients.producer.ProducerConfig : ProducerConfig values:
acks = 1
batch.size = 16384
bootstrap.servers = [ns014:9092]
buffer.memory = 33554432
client.id =
compression.type = none
Kafka in Spring Boot2.0,When I send message the picture value always appear.How can I close it?
I haven't found any changes to the configuration.
If you are using KafkaProducer for configuring Kafka, You can close the channel as below :
Producer<String, String> producer = new KafkaProducer<>(props);
producer.send(new ProducerRecord<String, String>("my-topic",
Integer.toString("any Data"), Integer.toString(i)));
producer.close();
If you are using KafkaTemplate of async calls, You can do it as :
ListenableFuture<SendResult<String, Payload>> sr = kafkaTemplate.send(topic, payload);
kafkaTemplate.flush();
This will flush all settings for particular Kafka Producer.

My tcp client using spring integration not able to get response

I have created tcp client using spring integration I am able to receive response for my send message . But when I uses localDateTime.now() to log time I am not able to receive the response of send message . I know this can be solved using time setting to make thread wait. As I am new to spring integration So kindly help me how to do it.
#Configuration
#ComponentScan
#EnableAutoConfiguration
public class Test
{
protected final Log logger = LogFactory.getLog(this.getClass());
// **************** Client **********************************************
#Bean
public MessageChannel replyChannel()
{
return new DirectChannel();
}
#Bean
public MessageChannel sendChannel()
{
MessageChannel directChannel = new DirectChannel();
return directChannel;
}
#EnableIntegration
#IntegrationComponentScan
#Configuration
public static class config
{
#MessagingGateway(defaultRequestChannel = "sendChannel", defaultReplyChannel = "replyChannel")
public interface Gateway
{
String Send(String in);
}
}
#Bean
AbstractClientConnectionFactory tcpNetClientConnectionFactory()
{
AbstractClientConnectionFactory tcpNetClientConnectionFactory = new TcpNetClientConnectionFactory("localhost",
9999);
tcpNetClientConnectionFactory.setSerializer(new UCCXImprovedSerializer());
tcpNetClientConnectionFactory.setDeserializer(new UCCXImprovedSerializer());
tcpNetClientConnectionFactory.setSingleUse(true);
tcpNetClientConnectionFactory.setMapper(new TcpMessageMapper());
return tcpNetClientConnectionFactory;
}
#Bean
#ServiceActivator(inputChannel = "sendChannel")
TcpOutboundGateway tcpOutboundGateway()
{
TcpOutboundGateway tcpOutboundGateway = new TcpOutboundGateway();
tcpOutboundGateway.setConnectionFactory(tcpNetClientConnectionFactory());
tcpOutboundGateway.setReplyChannel(replyChannel());
return tcpOutboundGateway;
}
public static void main(String args[])
{
// new LegaServer();
ConfigurableApplicationContext applicationContext = SpringApplication.run(Test.class, args);
String temp = applicationContext.getBean(Gateway.class).Send("kksingh");
System.out.println(LocalDateTime.now()+"output" + temp);
applicationContext.stop();
}
}
My custom serialzer and deserialser UCCXImprovedSerializerclass
after updating as per #Garry
public class UCCXImprovedSerializer implements Serializer<String>, Deserializer<String>
{
#Override
public String deserialize(InputStream initialStream) throws IOException
{
System.out.println("deserialzier called");
StringBuilder sb = new StringBuilder();
try (BufferedReader rdr = new BufferedReader(new InputStreamReader(initialStream)))
{
for (int c; (c = rdr.read()) != -1;)
{
sb.append((char) c);
}
}
return sb.toString();
}
#Override
public void serialize(String msg, OutputStream os) throws IOException
{
System.out.println(msg + "---serialize---" + Thread.currentThread().getName() + "");
os.write(msg.getBytes());
}
}
My server at port 9999 code
try
{
clientSocket = echoServer.accept();
System.out.println("client connection established..");
is = new DataInputStream(clientSocket.getInputStream());
os = new PrintStream(clientSocket.getOutputStream());
String tempString = "kksingh";
byte[] tempStringByte = tempString.getBytes();
byte[] temp = new byte[tempString.getBytes().length];
while (true)
{
is.read(temp);
System.out.println(new String(temp) + "--received msg is--- " + LocalDateTime.now());
System.out.println(LocalDateTime.now() + "sending value");
os.write(tempStringByte);
break;
}
} catch (IOException e)
{
System.out.println(e);
}
}
My log file for tcp client
2017-06-04 23:10:14.771 INFO 15568 --- [ main] o.s.i.endpoint.EventDrivenConsumer : started org.springframework.integration.endpoint.EventDrivenConsumer#1f12e153
kksingh---serialize---main
pool-1-thread-1---deserialize----
pool-1-thread-1---deserialize----
pool-1-thread-1---deserialize----
pool-1-thread-1---deserialize----
2017-06-04 23:10:14.812 ERROR 15568 --- [pool-1-thread-1] o.s.i.ip.tcp.TcpOutboundGateway : Cannot correlate response - no pending reply for localhost:9999:57622:bc98ee29-8957-47bd-bd8a-f734c3ec3f9d
2017-06-04T23:10:14.809output
2017-06-04 23:10:14.821 INFO 15568 --- [ main] o.s.c.support.DefaultLifecycleProcessor : Stopping beans in phase 0
My log file for server side
client connection established..
kksingh--received msg is--- 2017-06-04T23:10:14.899
2017-06-04T23:10:14.899sending value
when I removed the localdatetime.now() from server and tcpclient I am able to get response as outputkksingh
o.s.i.endpoint.EventDrivenConsumer : Adding {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel
2017-06-05 12:46:32.494 INFO 29076 --- [ main] o.s.i.channel.PublishSubscribeChannel : Channel 'application.errorChannel' has 1 subscriber(s).
2017-06-05 12:46:32.495 INFO 29076 --- [ main] o.s.i.endpoint.EventDrivenConsumer : started _org.springframework.integration.errorLogger
2017-06-05 12:46:32.746 INFO 29076 --- [ main] s.b.c.e.t.TomcatEmbeddedServletContainer : Tomcat started on port(s): 8080 (http)
2017-06-05 12:46:32.753 INFO 29076 --- [ main] o.s.i.samples.tcpclientserver.Test : Started Test in 2.422 seconds (JVM running for 2.716)
2017-06-05 12:46:32.761 INFO 29076 --- [ main] o.s.i.endpoint.EventDrivenConsumer : Adding {bridge:null} as a subscriber to the 'replyChannel' channel
2017-06-05 12:46:32.762 INFO 29076 --- [ main] o.s.integration.channel.DirectChannel : Channel 'application.replyChannel' has 1 subscriber(s).
2017-06-05 12:46:32.763 INFO 29076 --- [ main] o.s.i.endpoint.EventDrivenConsumer : started org.springframework.integration.endpoint.EventDrivenConsumer#1f12e153
kksingh---serialize---main
pool-1-thread-1---deserialize----kksingh
outputkksingh
2017-06-05 12:46:32.837 INFO 29076 --- [ main] o.s.c.support.DefaultLifecycleProcessor : Stopping beans in phase 0
2017-06-05 12:46:32.839 INFO 29076 --- [ main] o.s.i.endpoint.EventDrivenConsumer : Removing {bridge:null} as a subscriber to the 'replyChannel' channel
2017-06-05 12:46:32.839 INFO 29076 --- [
Your deserializer is deserializing multiple packets...
pool-1-thread-1---deserialize----
pool-1-thread-1---deserialize----
pool-1-thread-1---deserialize----
pool-1-thread-1---deserialize----
Which produces 4 reply messsages; the gateway can only handle one reply which is why you see that ERROR message.
You deserializer needs to be smarter than just capturing "available" bytes. You need something in the message to indicate the end of the data (or close the socket to indicate the end).

Resources