How to use Retry Tempate (backOffPolicy) with AfterRollbackProcessor in Spring Kafka 2.2.9.RELEASE - spring-boot

I am using a spring boot 2.1.9 with Kafka and MySQL and also implemented a chained transaction manager.
I want to set the backOffPolicy so that the retry can happen after a certain time. it's possible in the new spring Kafka version but due to some other dependencies, I could not able to upgrade spring boot.
As of now, I am using AfterRollbackProcessor to handle failed messages, now I want to implement backoffPolicy with AfterRollbackProcessor using Spring Kafka 2.2.9.RELEASE. Is there any way to implement it?
here is reciever config file:
#Configuration
#EnableKafka
public class KafkaReceiverConfig {
// Kafka Server Configuration
#Value("${kafka.servers}")
private String kafkaServers;
// Group Identifier
#Value("${kafka.groupId}")
private String groupId;
// Kafka Max Retry Attempts
#Value("${kafka.retry.maxAttempts:5}")
private Integer retryMaxAttempts;
// Kafka Max Retry Interval
#Value("${kafka.retry.interval:180000}")
private Long retryInterval;
// Kafka Concurrency
#Value("${kafka.concurrency:10}")
private Integer concurrency;
// Kafka Concurrency
#Value("${kafka.poll.timeout:300}")
private Integer pollTimeout;
// Kafka Consumer Offset
#Value("${kafka.consumer.auto-offset-reset:earliest}")
private String offset = "earliest";
#Value("${kafka.max.records:100}")
private Integer maxPollRecords;
#Value("${kafka.max.poll.interval.time:500000}")
private Integer maxPollIntervalMs;
#Value("${kafka.max.session.timeout:60000}")
private Integer sessionTimoutMs;
// Logger
private static final Logger log = LoggerFactory.getLogger(KafkaReceiverConfig.class);
/**
* String Kafka Listener Container Factor
*
* #return #see {#link KafkaListenerContainerFactory}
*/
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory(
ChainedKafkaTransactionManager<String, String> chainedTM, MessageProducer messageProducer) {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(pollTimeout);
factory.getContainerProperties().setAckMode(AckMode.RECORD);
factory.getContainerProperties().setSyncCommits(true);
// factory.setRetryTemplate(retryTemplate());
factory.getContainerProperties().setAckOnError(false);
factory.getContainerProperties().setTransactionManager(chainedTM);
// factory.setStatefulRetry(true);
AfterRollbackProcessor<String, String> afterRollbackProcessor = new DefaultAfterRollbackProcessor<>(
(record, exception) -> {
log.warn("failed to process kafka message (retries are exausted). topic name:" + record.topic()
+ " value:" + record.value());
messageProducer.saveFailedMessage(record, exception);
}, retryMaxAttempts);
factory.setAfterRollbackProcessor(afterRollbackProcessor);
log.debug("Kafka Receiver Config kafkaListenerContainerFactory created");
return factory;
}
/**
* String Consumer Factory
*
* #return #see {#link ConsumerFactory}
*/
#Bean
public ConsumerFactory<String, String> consumerFactory() {
log.debug("Kafka Receiver Config consumerFactory created");
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
/**
* Consumer Configurations
*
* #return #see {#link Map}
*/
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new ConcurrentHashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalMs);
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimoutMs);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offset);
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
log.debug("Kafka Receiver Config consumerConfigs created");
return props;
}
}

You can use listener retry but it MUST be stateful (you have that commented out). Otherwise, the retries will be performed within the transaction which is generally not what you want.
With stateful retry, the template throws the exception after it backs off; then the after rollback processor will perform a re-seek so the record is reprocessed.
As you say, in 2.3 we added a BackOff to the after rollback processor to make it easier to configure everything all in one place.

Related

#KafkaListener get all messages from particular kafka topic

I have a #KafkaListener method to get all messages in topic but I only get one message for each interval time that #Scheduled method works. How can I get all messages from topic in once?
Here's my class;
#Slf4j
#Service
public class KafkaConsumerServiceImpl implements KafkaConsumerService {
#Autowired
private SimpMessagingTemplate webSocket;
#Autowired
private KafkaListenerEndpointRegistry registry;
#Autowired
private BrokerProducerService brokerProducerService;
#Autowired
private GlobalConfig globalConfig;
#Override
#KafkaListener(id = "snapshotOfOutagesId", topics = Constants.KAFKA_TOPIC, groupId = "snapshotOfOutages", autoStartup = "false")
public void consumeToSnapshot(ConsumerRecord<String, OutageDTO> cr, #Payload String content) {
log.info("Received content from Kafka notification to notification-snapshot topic: {}", content);
MessageListenerContainer listenerContainer = registry.getListenerContainer("snapshotOfOutagesId");
JSONObject jsonObject= new JSONObject(content);
Map<String, Object> outageMap = jsonToMap(jsonObject);
brokerProducerService.sendMessage(globalConfig.getTopicProperties().getSnapshotTopicName(),
outageMap.get("outageId").toString(), toJson(outageMap));
listenerContainer.stop();
}
#Scheduled(initialDelayString = "${scheduler.kafka.snapshot.monitoring}",fixedRateString = "${scheduler.kafka.snapshot.monitoring}")
private void consumeWithScheduler() {
MessageListenerContainer listenerContainer = registry.getListenerContainer("snapshotOfOutagesId");
if (listenerContainer != null){
listenerContainer.start();
}
}
And here's my kafka properties in application.yml;
kafka:
streams:
common:
configs:
"[bootstrap.servers]": 192.168.99.100:9092
"[client.id]": event
"[producer.id]": event-producer
"[max.poll.interval.ms]": 300000
"[group.max.session.timeout.ms]": 300000
"[session.timeout.ms]": 200000
"[auto.commit.interval.ms]": 1000
"[auto.offset.reset]": latest
"[group.id]": event-consumer-group
"[max.poll.records]": 1
And also my KafkaConfiguration class;
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>(globalConfig.getBrokerProperties().getConfigs());
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
return props;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs(), new StringDeserializer(), new StringDeserializer());
}
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
return factory;
}
What you're currently doing is:
Create a listener but don't start it yet (autoStartup = false)
When the scheduled job kicks in, start the container (will start consuming the first message from the topic)
When the first message is consumed, you stop the container (resulting in no messages being consumed anymore)
So indeed the behavior you are describing is not a surprise.
#KafkaListener doesn't need a scheduled task to have start consuming messages. I think you can remove autoStartup = false and remove the scheduled job, after which the listener will consume all messages on the topic one by one, and wait for new ones when they appear on the topic.
Also, some other things I noticed:
The properties are for Kafka Streams, for regular Spring Kafka you need the properties like so:
spring:
kafka:
bootstrap-servers: localhost:9092
consumer:
auto-offset-reset: earliest
...etc
Also: why use #Payload String content instead of the already serialized cr.getVaue()?

Facing 'Unexpected error in AddOffsetsToTxnResponse' issue in spring kafka

I am using spring boot 2.2.7.RELEASE and spring Kafka 2.3.8 (and also Kafka chained transaction manager)
confluent kafka is broker.
I am facing some issues while Sending a message to Kafka. here are the logs
2020-07-09 08:28:13.139 ERROR [xxxxxx-component-workflow-handler,,,] 9 --- [_response-4-C-1] essageListenerContainer$ListenerConsumer : Send offsets to transaction failed
org.apache.kafka.common.KafkaException: Unexpected error in AddOffsetsToTxnResponse: The producer attempted to use a producer id which is not currently assigned to its transactional id.
at org.apache.kafka.clients.producer.internals.TransactionManager$AddOffsetsToTxnHandler.handleResponse(TransactionManager.java:1406)
at org.apache.kafka.clients.producer.internals.TransactionManager$TxnRequestHandler.onComplete(TransactionManager.java:1069)
at org.apache.kafka.clients.ClientResponse.onComplete(ClientResponse.java:109)
at org.apache.kafka.clients.NetworkClient.completeResponses(NetworkClient.java:561)
at org.apache.kafka.clients.NetworkClient.poll(NetworkClient.java:553)
at org.apache.kafka.clients.producer.internals.Sender.maybeSendAndPollTransactionalRequest(Sender.java:425)
at org.apache.kafka.clients.producer.internals.Sender.runOnce(Sender.java:311)
at org.apache.kafka.clients.producer.internals.Sender.run(Sender.java:244)
at java.base/java.lang.Thread.run(Thread.java:832)
2020-07-09 08:28:13.153 ERROR [xxxxxx-component-workflow-handler,,,] 9 --- [_response-4-C-1] o.s.k.core.DefaultKafkaProducerFactory : commitTransaction failed: CloseSafeProducer [delegate=org.apache.kafka.clients.producer.KafkaProducer#3d0f1d94, txId=xxxxxx-dagusa-Process-Handler-et3YEAYB1R1F6h-complete_fulfillment_item_response.complete_fulfillment_item_response.4]
org.apache.kafka.common.KafkaException: Cannot execute transactional method because we are in an error state
at org.apache.kafka.clients.producer.internals.TransactionManager.maybeFailWithError(TransactionManager.java:924)
at org.apache.kafka.clients.producer.internals.TransactionManager.lambda$beginCommit$2(TransactionManager.java:296)
at org.apache.kafka.clients.producer.internals.TransactionManager.handleCachedTransactionRequestResult(TransactionManager.java:1008)
at org.apache.kafka.clients.producer.internals.TransactionManager.beginCommit(TransactionManager.java:295)
at org.apache.kafka.clients.producer.KafkaProducer.commitTransaction(KafkaProducer.java:704)
at org.springframework.kafka.core.DefaultKafkaProducerFactory$CloseSafeProducer.commitTransaction(DefaultKafkaProducerFactory.java:691)
at brave.kafka.clients.TracingProducer.commitTransaction(TracingProducer.java:72)
at org.springframework.kafka.core.KafkaResourceHolder.commit(KafkaResourceHolder.java:58)
at org.springframework.kafka.transaction.KafkaTransactionManager.doCommit(KafkaTransactionManager.java:200)
at org.springframework.transaction.support.AbstractPlatformTransactionManager.processCommit(AbstractPlatformTransactionManager.java:743)
at org.springframework.transaction.support.AbstractPlatformTransactionManager.commit(AbstractPlatformTransactionManager.java:711)
at org.springframework.data.transaction.MultiTransactionStatus.commit(MultiTransactionStatus.java:74)
at org.springframework.data.transaction.ChainedTransactionManager.commit(ChainedTransactionManager.java:150)
at org.springframework.transaction.support.TransactionTemplate.execute(TransactionTemplate.java:152)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeRecordListenerInTx(KafkaMessageListenerContainer.java:1569)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeRecordListener(KafkaMessageListenerContainer.java:1546)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeListener(KafkaMessageListenerContainer.java:1288)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1035)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:949)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
at java.base/java.lang.Thread.run(Thread.java:832)
Caused by: org.apache.kafka.common.KafkaException: Unexpected error in AddOffsetsToTxnResponse: The producer attempted to use a producer id which is not currently assigned to its transactional id.
at org.apache.kafka.clients.producer.internals.TransactionManager$AddOffsetsToTxnHandler.handleResponse(TransactionManager.java:1406)
at org.apache.kafka.clients.producer.internals.TransactionManager$TxnRequestHandler.onComplete(TransactionManager.java:1069)
at org.apache.kafka.clients.ClientResponse.onComplete(ClientResponse.java:109)
at org.apache.kafka.clients.NetworkClient.completeResponses(NetworkClient.java:561)
at org.apache.kafka.clients.NetworkClient.poll(NetworkClient.java:553)
at org.apache.kafka.clients.producer.internals.Sender.maybeSendAndPollTransactionalRequest(Sender.java:425)
at org.apache.kafka.clients.producer.internals.Sender.runOnce(Sender.java:311)
at org.apache.kafka.clients.producer.internals.Sender.run(Sender.java:244)
... 1 common frames omitted
2020-07-09 08:28:13.157 WARN [xxxxxx-component-workflow-handler,,,] 9 --- [_response-4-C-1] o.s.k.core.DefaultKafkaProducerFactory : Error during some operation; producer removed from cache: CloseSafeProducer [delegate=org.apache.kafka.clients.producer.KafkaProducer#3d0f1d94, txId=xxxxxx-dagusa-Process-Handler-et3YEAYB1R1F6h-complete_fulfillment_item_response.complete_fulfillment_item_response.4]
2020-07-09 08:28:13.167 ERROR [xxxxxx-component-workflow-handler,,,] 9 --- [_response-4-C-1] essageListenerContainer$ListenerConsumer : Transaction rolled back
org.springframework.transaction.HeuristicCompletionException: Heuristic completion: outcome state is mixed; nested exception is org.apache.kafka.common.KafkaException: Cannot execute transactional method because we are in an error state
at org.springframework.data.transaction.ChainedTransactionManager.commit(ChainedTransactionManager.java:177)
at org.springframework.transaction.support.TransactionTemplate.execute(TransactionTemplate.java:152)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeRecordListenerInTx(KafkaMessageListenerContainer.java:1569)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeRecordListener(KafkaMessageListenerContainer.java:1546)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeListener(KafkaMessageListenerContainer.java:1288)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1035)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:949)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
at java.base/java.lang.Thread.run(Thread.java:832)
Caused by: org.apache.kafka.common.KafkaException: Cannot execute transactional method because we are in an error state
at org.apache.kafka.clients.producer.internals.TransactionManager.maybeFailWithError(TransactionManager.java:924)
at org.apache.kafka.clients.producer.internals.TransactionManager.lambda$beginCommit$2(TransactionManager.java:296)
at org.apache.kafka.clients.producer.internals.TransactionManager.handleCachedTransactionRequestResult(TransactionManager.java:1008)
at org.apache.kafka.clients.producer.internals.TransactionManager.beginCommit(TransactionManager.java:295)
at org.apache.kafka.clients.producer.KafkaProducer.commitTransaction(KafkaProducer.java:704)
at org.springframework.kafka.core.DefaultKafkaProducerFactory$CloseSafeProducer.commitTransaction(DefaultKafkaProducerFactory.java:691)
at brave.kafka.clients.TracingProducer.commitTransaction(TracingProducer.java:72)
at org.springframework.kafka.core.KafkaResourceHolder.commit(KafkaResourceHolder.java:58)
at org.springframework.kafka.transaction.KafkaTransactionManager.doCommit(KafkaTransactionManager.java:200)
at org.springframework.transaction.support.AbstractPlatformTransactionManager.processCommit(AbstractPlatformTransactionManager.java:743)
at org.springframework.transaction.support.AbstractPlatformTransactionManager.commit(AbstractPlatformTransactionManager.java:711)
at org.springframework.data.transaction.MultiTransactionStatus.commit(MultiTransactionStatus.java:74)
at org.springframework.data.transaction.ChainedTransactionManager.commit(ChainedTransactionManager.java:150)
... 9 common frames omitted
Caused by: org.apache.kafka.common.KafkaException: Unexpected error in AddOffsetsToTxnResponse: The producer attempted to use a producer id which is not currently assigned to its transactional id.
at org.apache.kafka.clients.producer.internals.TransactionManager$AddOffsetsToTxnHandler.handleResponse(TransactionManager.java:1406)
at org.apache.kafka.clients.producer.internals.TransactionManager$TxnRequestHandler.onComplete(TransactionManager.java:1069)
at org.apache.kafka.clients.ClientResponse.onComplete(ClientResponse.java:109)
at org.apache.kafka.clients.NetworkClient.completeResponses(NetworkClient.java:561)
at org.apache.kafka.clients.NetworkClient.poll(NetworkClient.java:553)
at org.apache.kafka.clients.producer.internals.Sender.maybeSendAndPollTransactionalRequest(Sender.java:425)
at org.apache.kafka.clients.producer.internals.Sender.runOnce(Sender.java:311)
at org.apache.kafka.clients.producer.internals.Sender.run(Sender.java:244)
... 1 common frames omitted
Here is my config:
kafka sender config:
#Configuration
#EnableKafka
public class KafkaSenderConfig{
#Value("${kafka.servers}")
private String kafkaServers;
#Value("${application.name}")
private String applicationName;
private static final Logger log = LoggerFactory.getLogger(KafkaSenderConfig.class);
#Bean(value = "stringKafkaTransactionManager")
public KafkaTransactionManager<String, String> kafkaStringTransactionManager() {
KafkaTransactionManager<String, String> ktm = new KafkaTransactionManager<String, String>(stringProducerFactory());
ktm.setNestedTransactionAllowed(true);
ktm.setTransactionSynchronization(AbstractPlatformTransactionManager.SYNCHRONIZATION_ALWAYS);
return ktm;
}
#Bean(value = "stringProducerFactory")
#Primary
public ProducerFactory<String, String> stringProducerFactory() {
log.debug("Kafka Servers: " + kafkaServers);
Map<String, Object> config = getConfigs();
//config.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
DefaultKafkaProducerFactory<String, String> defaultKafkaProducerFactory = new DefaultKafkaProducerFactory<>(config);
String randomString=applicationName.replaceAll("\\s+","-").concat("-").concat(StringUtil.getRandomString(14)).concat("-");
defaultKafkaProducerFactory.setTransactionIdPrefix(randomString);
return defaultKafkaProducerFactory;
}
/**
* Create a new Kafka Template for String based Messages
*
* #return
*/
#Bean(value = "stringKafkaTemplate")
#Primary
public KafkaTemplate<String, String> stringKafkaTemplate() {
log.debug("Creating the Kafka Template for String Producer Factory");
return new KafkaTemplate<>(stringProducerFactory(),true);
}
#Bean(name = "chainedStringKafkaTransactionManager")
#Primary
public ChainedKafkaTransactionManager<String, String> chainedTransactionManager(JpaTransactionManager jpaTransactionManager, DataSourceTransactionManager dsTransactionManager) {
return new ChainedKafkaTransactionManager<>(kafkaStringTransactionManager(), jpaTransactionManager, dsTransactionManager);
}
private Map<String, Object> getConfigs() {
Map<String, Object> config = new ConcurrentHashMap<>();
config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
config.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
//Try to send msgs out in 100ms even if the batch size is not met
config.put(ProducerConfig.LINGER_MS_CONFIG, 100);
config.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
config.put(ProducerConfig.ACKS_CONFIG, "all");
return config;
}
}
kafka receiver config:
#Configuration
#EnableKafka
public class KafkaReceiverConfig {
// Kafka Server Configuration
#Value("${kafka.servers}")
private String kafkaServers;
// Group Identifier
#Value("${kafka.groupId}")
private String groupId;
// Kafka Max Retry Attempts
#Value("${kafka.retry.maxAttempts:3}")
private Integer retryMaxAttempts;
// Kafka Max Retry Interval
#Value("${kafka.retry.interval:30000}")
private Long retryInterval;
// Kafka Concurrency
#Value("${kafka.concurrency:10}")
private Integer concurrency;
// Kafka Concurrency
#Value("${kafka.poll.timeout:300}")
private Integer pollTimeout;
// Kafka Consumer Offset
#Value("${kafka.consumer.auto-offset-reset:earliest}")
private String offset = "earliest";
#Value("${kafka.max.records:100}")
private Integer maxPollRecords;
#Value("${kafka.max.poll.interval.time:500000}")
private Integer maxPollIntervalMs;
#Value("${kafka.max.session.timeout:60000}")
private Integer sessionTimoutMs;
// String Kafka Template to send Messages
#Autowired
#Qualifier("stringKafkaTemplate")
private KafkaTemplate<String, String> stringKafkaTemplate;
// Logger
private static final Logger log = LoggerFactory.getLogger(KafkaReceiverConfig.class);
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory(
ChainedKafkaTransactionManager<String, String> chainedTM, MessageProducer messageProducer) {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(pollTimeout);
factory.getContainerProperties().setAckMode(AckMode.RECORD);
factory.getContainerProperties().setSyncCommits(true);
factory.getContainerProperties().setAckOnError(false);
factory.getContainerProperties().setTransactionManager(chainedTM);
DefaultAfterRollbackProcessor<String, String> afterRollbackProcessor = new DefaultAfterRollbackProcessor<>(
(record, exception) -> {
log.warn("failed to process kafka message (retries are exausted). topic name:" + record.topic()
+ " value:" + record.value());
messageProducer.saveFailedMessage(record, exception);
}, new FixedBackOff(retryInterval, retryMaxAttempts));
afterRollbackProcessor.setCommitRecovered(true);
afterRollbackProcessor.setKafkaTemplate(stringKafkaTemplate);
factory.setAfterRollbackProcessor(afterRollbackProcessor);
log.debug("Kafka Receiver Config kafkaListenerContainerFactory created");
return factory;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
log.debug("Kafka Receiver Config consumerFactory created");
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new ConcurrentHashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalMs);
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimoutMs);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offset);
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
log.debug("Kafka Receiver Config consumerConfigs created");
return props;
}
}
here is my code to send message
#Transactional(readOnly = false)
public void initiateOrderUpdate(String jsonString){
// some logic here
stringKafkaTemplate.send("some_tpic", jsonString);
// some logic here
}
previously I was using spring boot 2.1.9 and spring Kafka 2.2.9, everything working fine, but after upgrading to above one I am facing this issue.
Is there any issue with configuration ?

How to commit offset in spring Kafka if the message failed and processed by AfterRollbackProcessor

I am using spring boot 2.1.9 with spring Kafka 2.2.9.
If the message failed a number of times (defined in the afterRollbackProcessor), The consumer stops polling the record. but if the consumer restarted, it again re-poll the same message and processes.
But I don't want the messages to be re-polled again, What is the best way to stop it?
here is my config
#Configuration
#EnableKafka
public class KafkaReceiverConfig {
// Kafka Server Configuration
#Value("${kafka.servers}")
private String kafkaServers;
// Group Identifier
#Value("${kafka.groupId}")
private String groupId;
// Kafka Max Retry Attempts
#Value("${kafka.retry.maxAttempts:5}")
private Integer retryMaxAttempts;
// Kafka Max Retry Interval
#Value("${kafka.retry.interval:180000}")
private Long retryInterval;
// Kafka Concurrency
#Value("${kafka.concurrency:10}")
private Integer concurrency;
// Kafka Concurrency
#Value("${kafka.poll.timeout:300}")
private Integer pollTimeout;
// Kafka Consumer Offset
#Value("${kafka.consumer.auto-offset-reset:earliest}")
private String offset = "earliest";
#Value("${kafka.max.records:100}")
private Integer maxPollRecords;
#Value("${kafka.max.poll.interval.time:500000}")
private Integer maxPollIntervalMs;
#Value("${kafka.max.session.timeout:60000}")
private Integer sessionTimoutMs;
// Logger
private static final Logger log = LoggerFactory.getLogger(KafkaReceiverConfig.class);
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory(
ChainedKafkaTransactionManager<String, String> chainedTM, MessageProducer messageProducer) {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(pollTimeout);
factory.getContainerProperties().setAckMode(AckMode.RECORD);
factory.getContainerProperties().setSyncCommits(true);
factory.getContainerProperties().setAckOnError(false);
factory.getContainerProperties().setTransactionManager(chainedTM);
AfterRollbackProcessor<String, String> afterRollbackProcessor = new DefaultAfterRollbackProcessor<>(
(record, exception) -> {
log.warn("failed to process kafka message (retries are exausted). topic name:" + record.topic()
+ " value:" + record.value());
messageProducer.saveFailedMessage(record, exception);
}, retryMaxAttempts);
factory.setAfterRollbackProcessor(afterRollbackProcessor);
log.debug("Kafka Receiver Config kafkaListenerContainerFactory created");
return factory;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
log.debug("Kafka Receiver Config consumerFactory created");
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new ConcurrentHashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalMs);
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimoutMs);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offset);
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
log.debug("Kafka Receiver Config consumerConfigs created");
return props;
}
}
How can I achieve this?
Set the commitRecovered property to true and inject a KafkaTemplate configured with the same producer factory as the transaction manager.
/**
* {#inheritDoc}
* Set to true and the container will run the
* {#link #process(List, Consumer, Exception, boolean)} method in a transaction and,
* if a record is skipped and recovered, we will send its offset to the transaction.
* Requires a {#link KafkaTemplate}.
* #param commitRecovered true to process in a transaction.
* #since 2.2.5
* #see #isProcessInTransaction()
* #see #process(List, Consumer, Exception, boolean)
* #see #setKafkaTemplate(KafkaTemplate)
*/
#Override
public void setCommitRecovered(boolean commitRecovered) { // NOSONAR enhanced javadoc
super.setCommitRecovered(commitRecovered);
}
EDIT
Here's the logic in process...
if (SeekUtils.doSeeks(((List) records), consumer, exception, recoverable,
getSkipPredicate((List) records, exception), this.logger)
&& isCommitRecovered() && this.kafkaTemplate != null && this.kafkaTemplate.isTransactional()) {
// if we get here it means retries are exhausted and we've skipped
ConsumerRecord<K, V> skipped = records.get(0);
this.kafkaTemplate.sendOffsetsToTransaction(
Collections.singletonMap(new TopicPartition(skipped.topic(), skipped.partition()),
new OffsetAndMetadata(skipped.offset() + 1)));
}
EDIT2
In 2.2.x, the property is
/**
* Set to true to run the {#link #process(List, Consumer, Exception, boolean)}
* method in a transaction. Requires a {#link KafkaTemplate}.
* #param processInTransaction true to process in a transaction.
* #since 2.2.5
* #see #process(List, Consumer, Exception, boolean)
* #see #setKafkaTemplate(KafkaTemplate)
*/
public void setProcessInTransaction(boolean processInTransaction) {
this.processInTransaction = processInTransaction;
}

auto-commit of offsets Failed & Retry also not working as excepted

I am using spring boot 2.1.9 with spring Kafka 2.2.9
I am getting some warning in logs file which says commit failed and also i am using SeekToCurrentErrorHandler to capture the error once retry exausted , but sometimes if commits failed its keeps on iterating.
here is my config class
#Configuration
#EnableKafka
public class KafkaReceiverConfig {
// Kafka Server Configuration
#Value("${kafka.servers}")
private String kafkaServers;
// Group Identifier
#Value("${kafka.groupId}")
private String groupId;
// Kafka Max Retry Attempts
#Value("${kafka.retry.maxAttempts:5}")
private Integer retryMaxAttempts;
// Kafka Max Retry Interval
#Value("${kafka.retry.interval:180000}")
private Long retryInterval;
// Kafka Concurrency
#Value("${kafka.concurrency:10}")
private Integer concurrency;
// Kafka Concurrency
#Value("${kafka.poll.timeout:100}")
private Integer pollTimeout;
// Kafka Consumer Offset
#Value("${kafka.consumer.auto-offset-reset:earliest}")
private String offset = "earliest";
// Logger
private static final Logger log = LoggerFactory.getLogger(KafkaReceiverConfig.class);
/**
* Defines the Max Number of Retry Attempts
*
* #return Return the Retry Policy #see {#link RetryPolicy}
*/
#Bean
public RetryPolicy retryPolicy() {
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
simpleRetryPolicy.setMaxAttempts(retryMaxAttempts);
return simpleRetryPolicy;
}
/**
* Time before the next Retry can happen, the Time used is in Milliseconds
*
* #return Return the BackOff Policy #see {#link BackOffPolicy}
*/
#Bean
public BackOffPolicy backOffPolicy() {
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(retryInterval);
return backOffPolicy;
}
/**
* Get Retry Template
*
* #return Return the Retry Template #see {#link RetryTemplate}
*/
#Bean
public RetryTemplate retryTemplate() {
RetryTemplate retryTemplate = new RetryTemplate();
retryTemplate.setRetryPolicy(retryPolicy());
retryTemplate.setBackOffPolicy(backOffPolicy());
return retryTemplate;
}
/**
* String Kafka Listener Container Factor
*
* #return #see {#link KafkaListenerContainerFactory}
*/
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory(
ChainedKafkaTransactionManager<String, String> chainedTM, MessageProducer messageProducer) {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<String, String>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(pollTimeout);
factory.getContainerProperties().setSyncCommits(true);
factory.setRetryTemplate(retryTemplate());
factory.getContainerProperties().setAckOnError(false);
factory.getContainerProperties().setTransactionManager(chainedTM);
factory.setStatefulRetry(true);
// NOTE: retryMaxAttempts should always +1 due to spring kafka bug
SeekToCurrentErrorHandler errorHandler = new SeekToCurrentErrorHandler((record, exception) -> {
log.warn("failed to process kafka message (retries are exausted). topic name:"+record.topic()+" value:"+record.value());
messageProducer.saveFailedMessage(record, exception);
}, retryMaxAttempts + 1);
factory.setErrorHandler(errorHandler);
log.debug("Kafka Receiver Config kafkaListenerContainerFactory created");
return factory;
}
/**
* String Consumer Factory
*
* #return #see {#link ConsumerFactory}
*/
#Bean
public ConsumerFactory<String, String> consumerFactory() {
log.debug("Kafka Receiver Config consumerFactory created");
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
/**
* Consumer Configurations
*
* #return #see {#link Map}
*/
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new ConcurrentHashMap<String, Object>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
// Disable the Auto Commit if required for testing
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offset);
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
log.debug("Kafka Receiver Config consumerConfigs created");
return props;
}
}
here is log :
2019-10-30 15:48:05.907 WARN [xxxxx-component-workflow-starter,,,] 11 --- [nt_create-2-C-1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-4, groupId=fulfillment_create] Synchronous auto-commit of offsets {fulfillment_create-4=OffsetAndMetadata{offset=32, metadata=''}} failed: Commit cannot be completed since the group has already rebalanced and assigned the partitions to another member. This means that the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time message processing. You can address this either by increasing the session timeout or by reducing the maximum size of batches returned in poll() with max.poll.records.
Is there any problem with my config file?
how to set max poll and session timeout and all? (give me on example)
How to setup SeekToCurrentErrorHandler in spring Kafka 2.2.9 so that it works well (because I cannot upgrade spring Kafka due to some other dependencies)?
You are taking too long to process the records returned by the poll().
You need to reduce max.poll.records (ConsumerConfig.MAX_POLL_RECORDS_CONFIG) and/or increase max.poll.interval.ms.
You can't perform a seek after this error - you have lost the partitions.

Custom MessageConverter with Spring JmsMessagingTemplate is not working as I expected

I'm trying to attach a custom message converter that implements org.springframework.jms.support.converter.MessageConverter, to a JmsMessagingTemplate.
I've read somewhere that we can attach the message converter to a MessagingMessageConverter by calling setPayloadConverter, and then attach that messaging message converter to the JmsMessagingTemplate via setJmsMessageConverter. After that, I call convertAndSend, but I notice that it doesn't convert the payload.
When I debugged the code, I notice that setting Jms Message Converter doesn't set the converter instance variable in the JmsMessagingTemplate. So when the convertAndSend method calls doConvert and tries to getConverter, it is getting the default simple message converter and not my custom one.
My question is, can I use an implementation of org.springframework.jms.support.converter.MessageConverter with a JmsMessagingTemplate? Or do I need to use an implementation of org.springframework.messaging.converter.MessageConverter?
I'm using Spring Boot 1.4.1.RELEASE, and Spring 4.3.3.RELEASE. The code is below.
Configuration
#Configuration
#EnableJms
public class MessagingEncryptionPocConfig {
/**
* Listener ActiveMQ Connection Factory
*/
#Bean(name="listenerActiveMqConnectionFactory")
public ActiveMQConnectionFactory listenerActiveMqConnectionFactory() {
return new ActiveMQConnectionFactory("admin","admin","tcp://localhost:61616");
}
/**
* Producer ActiveMQ Connection Factory
*/
#Bean(name="producerActiveMqConnectionFactory")
public ActiveMQConnectionFactory producerActiveMqConnectionFactory() {
return new ActiveMQConnectionFactory("admin","admin","tcp://localhost:61616");
}
/**
* Caching Connection Factory
*/
#Bean
public CachingConnectionFactory cachingConnectionFactory(#Qualifier("producerActiveMqConnectionFactory") ActiveMQConnectionFactory activeMqConnectionFactory) {
return new CachingConnectionFactory(activeMqConnectionFactory);
}
/**
* JMS Listener Container Factory
*/
#Bean
public DefaultJmsListenerContainerFactory jmsListenerContainerFactory(#Qualifier("listenerActiveMqConnectionFactory") ActiveMQConnectionFactory connectionFactory, MessagingMessageConverter messageConverter) {
DefaultJmsListenerContainerFactory defaultJmsListenerContainerFactory = new DefaultJmsListenerContainerFactory();
defaultJmsListenerContainerFactory.setConnectionFactory(connectionFactory);
defaultJmsListenerContainerFactory.setMessageConverter(messageConverter);
return defaultJmsListenerContainerFactory;
}
/**
* Jms Queue Template
*/
#Bean(name="queueTemplate")
public JmsMessagingTemplate queueTemplate(CachingConnectionFactory cachingConnectionFactory, MessageConverter messagingMessageConverter) {
JmsMessagingTemplate queueTemplate = new JmsMessagingTemplate(cachingConnectionFactory);
queueTemplate.setJmsMessageConverter(messagingMessageConverter);
return queueTemplate;
}
#Bean
public MessageConverter encryptionDecryptionMessagingConverter(Jaxb2Marshaller jaxb2Marshaller) {
MessageConverter encryptionDecryptionMessagingConverter = new EncryptionDecryptionMessagingConverter(jaxb2Marshaller);
MessagingMessageConverter messageConverter = new MessagingMessageConverter();
messageConverter.setPayloadConverter(encryptionDecryptionMessagingConverter);
return messageConverter;
}
/**
* Jaxb marshaller
*/
#Bean(name="producerJaxb2Marshaller")
public Jaxb2Marshaller jaxb2Marshaller() {
Jaxb2Marshaller jaxb2Marshaller = new Jaxb2Marshaller();
jaxb2Marshaller.setPackagesToScan("com.schema");
return jaxb2Marshaller;
}
}
MessageProducer Class
#Component
public class MessageProducer {
private static final Logger LOG = LoggerFactory.getLogger(MessageProducer.class);
#Autowired
#Qualifier("queueTemplate")
private JmsMessagingTemplate queueTemplate;
public void publishMsg(Transaction trx, Map<String,Object> jmsHeaders, MessagePostProcessor postProcessor) {
LOG.info("Sending Message. Payload={} Headers={}",trx,jmsHeaders);
queueTemplate.convertAndSend("queue.source", trx, jmsHeaders, postProcessor);
}
}
Unit Test
#RunWith(SpringRunner.class)
#SpringBootTest
#ActiveProfiles("test")
public class WebsMessagingEncryptionPocApplicationTests {
#Autowired
private MessageProducer producer;
#Autowired
private MessageListener messageListener;
/**
* Ensure that a message is sent, and received.
*/
#Test
public void testProducer() throws Exception{
//ARRANGE
CountDownLatch latch = new CountDownLatch(1);
messageListener.setCountDownLatch(latch);
Transaction trx = new Transaction();
trx.setCustomerAccountID(new BigInteger("111111"));
Map<String,Object> jmsHeaders = new HashMap<String,Object>();
jmsHeaders.put("tid", "1234563423");
MessagePostProcessor encryptPostProcessor = new EncryptMessagePostProcessor();
//ACT
producer.publishMsg(trx, jmsHeaders, encryptPostProcessor);
latch.await();
//ASSERT - assertion done in the consumer
}
}
The converter field is used to convert your input params to a spring-messaging Message<?>.
The JMS converter is used later (in MessagingMessageCreator) to then create a JMS Message from the messaging Message<?>.

Resources