KafkaConsumer With Multiple Different Avro Producers And Transactions - spring-boot

I have a single kafka consumer. It consumes a string. Based on the string we then convert it to different avro object and publish them to different topics. We require EOS and the issue we are getting is the producer marked with #Primary works however the one without primary fails with the error below. Is there anyway to accomodate both?
KafkaConsumer
#Configuration
public class KafkaConsumerConfig {
#Value("${kafka.server}")
String server;
#Value("${kafka.consumer.groupid}")
String groupid;
#Autowired
Tracer tracer;
#Bean
public ConsumerFactory<String, String> consumerFactory() {
Map<String, Object> config = new HashMap<>();
config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, server);
config.put(ConsumerConfig.GROUP_ID_CONFIG, groupid);
config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
config.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
config.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 120000);
config.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 10000);
//config.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 15000);
return new TracingConsumerFactory<>(new DefaultKafkaConsumerFactory<>(config), tracer);
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory(
KafkaAwareTransactionManager<Object, Object> transactionManager) {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<String, String>();
factory.setConsumerFactory(consumerFactory());
factory.setAutoStartup(false);
factory.setConcurrency(2);
factory.setBatchListener(true);
factory.getContainerProperties().setAckMode(AckMode.BATCH);
factory.getContainerProperties().setEosMode(EOSMode.ALPHA);
factory.getContainerProperties().setTransactionManager(transactionManager);
return factory;
}
}
KafkaProducer 1
#Configuration
public class KafkaProducerConfig {
#Value("${kafka.server}")
String server;
#Autowired
public Tracer tracer;
String tranId = "eventsanavro";
#Bean(name = "transactionalProducerFactoryAvro")
public ProducerFactory<String, TransactionAvroEntity> producerFactoryavro() {
Map<String, Object> config = new HashMap<>();
config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, server);
config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, AvroSerializer.class.getName());
config.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
config.put(ProducerConfig.ACKS_CONFIG, "all");
config.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, tranId);
config.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");
config.put(ProducerConfig.LINGER_MS_CONFIG, "200");
config.put(ProducerConfig.BATCH_SIZE_CONFIG, Integer.toString(256 * 1024));
config.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 120000);
config.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 60000);
config.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 5);
config.put(ProducerConfig.BUFFER_MEMORY_CONFIG, Integer.toString(32768 * 1024));
config.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
config.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, tranId);
return new TracingProducerFactory<>(new DefaultKafkaProducerFactory<>(config), tracer);
}
#Qualifier("transactionalProducerFactoryAvro")
#Bean(name = "transactionalKafkaTemplateAvro")
public KafkaTemplate<String, TransactionAvroEntity> kafkaTemplate() {
return new KafkaTemplate<>(producerFactoryavro());
}
#Qualifier("transactionalProducerFactoryAvro")
#Bean(name = "transactionalKafkaTransactionManagerAvro")
public KafkaAwareTransactionManager<?, ?> kafkaTransactionManager(
ProducerFactory<String, TransactionAvroEntity> producerFactory) {
return new KafkaTransactionManager<>(producerFactory);
}
}
KafkaProducer 2
#Configuration
public class KafkaProducerNonAvroConfig {
#Value("${kafka.server}")
String server;
#Autowired
public Tracer tracer;
String tranId = "eventsannonavro";
#Primary
#Bean(name = "transactionalProducerFactoryNonAvro")
public ProducerFactory<String, String> producerFactoryNonAvro() {
Map<String, Object> config = new HashMap<>();
config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, server);
config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
config.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
config.put(ProducerConfig.ACKS_CONFIG, "all");
config.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, tranId);
config.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");
config.put(ProducerConfig.LINGER_MS_CONFIG, "200");
config.put(ProducerConfig.BATCH_SIZE_CONFIG, Integer.toString(256 * 1024));
config.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,120000);
config.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,60000);
config.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 5);
config.put(ProducerConfig.BUFFER_MEMORY_CONFIG, Integer.toString(32768* 1024));
config.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
config.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, tranId);
return new TracingProducerFactory<>(new DefaultKafkaProducerFactory<>(config), tracer);
}
#Primary
#Qualifier("transactionalProducerFactoryNonAvro")
#Bean(name = "transactionalKafkaTemplateNonAvro")
public KafkaTemplate<String, String> kafkatemplate() {
return new KafkaTemplate<>(producerFactoryNonAvro());
}
#Primary
#Qualifier("transactionalProducerFactoryNonAvro")
#Bean(name = "transactionalKafkaTransactionManagerNonAvro")
public KafkaAwareTransactionManager<?, ?> kafkaTransactionManager(ProducerFactory<String, String> producerFactory) {
return new KafkaTransactionManager<>(producerFactory);
}
}
ProducerWrapper
#Service
public class KafkaTopicProducer {
#Autowired
private KafkaTemplate<String, TransactionAvroEntity> kafkaTemplate;
#Autowired
private KafkaTemplate<String, String> kafkaProducerNonAvrokafkaTemplate;
public void topicProducerAvro(TransactionAvroEntity payload, String topic, Headers headers) {
ProducerRecord<String, TransactionAvroEntity> producerRecord = new ProducerRecord<String, TransactionAvroEntity>(
topic, null, UUID.randomUUID().toString(), payload, headers);
kafkaTemplate.send(producerRecord);
}
public void kafkaAvroFlush() {
kafkaTemplate.flush();
}
public void topicProducerNonAvro(String payload, String topic, Headers headers) {
ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(topic, null,
UUID.randomUUID().toString(), payload, headers);
kafkaProducerNonAvrokafkaTemplate.send(producerRecord);
}
public void kafkaNonAvroFlush() {
kafkaProducerNonAvrokafkaTemplate.flush();
}
}
ERROR
Caused by: java.lang.IllegalStateException: No transaction is in process; possible solutions: run the template operation within the scope of a template.executeInTransaction() operation, start a transaction with #Transactional before invoking the template method, run in a transaction started by a listener container when consuming a record
Full Stack Trace
2022-05-03 09:35:11,358 INFO [nerMoz-0-C-1] o.a.kafka.clients.consumer.KafkaConsumer : [Consumer clientId=consumer-ifhEventSanitizer-1, groupId=ifhEventSanitizer] Seeking to offset 0 for partition za.local.file.singleLineGLTransactionEvent.1-0
2022-05-03 09:35:11,883 INFO [nerMoz-0-C-1] o.a.kafka.clients.producer.KafkaProducer : [Producer clientId=producer-eventsanavroifhEventSanitizer.za.local.file.singleLineGLTransactionEvent.1.0, transactionalId=eventsanavroifhEventSanitizer.za.local.file.singleLineGLTransactionEvent.1.0] Aborting incomplete transaction
2022-05-03 09:35:11,884 ERROR [nerMoz-0-C-1] essageListenerContainer$ListenerConsumer : Transaction rolled back
org.springframework.kafka.listener.ListenerExecutionFailedException: Listener method 'public boolean com.fnb.fin.ifhEventSanitizer.kafka.KafkaConsumerMoz.consume(java.util.List<org.apache.kafka.clients.consumer.ConsumerRecord<java.lang.String, java.lang.String>>,org.apache.kafka.clients.consumer.Consumer<?, ?>)' threw exception; nested exception is java.lang.IllegalStateException: No transaction is in process; possible solutions: run the template operation within the scope of a template.executeInTransaction() operation, start a transaction with #Transactional before invoking the template method, run in a transaction started by a listener container when consuming a record; nested exception is java.lang.IllegalStateException: No transaction is in process; possible solutions: run the template operation within the scope of a template.executeInTransaction() operation, start a transaction with #Transactional before invoking the template method, run in a transaction started by a listener container when consuming a record
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.decorateException(KafkaMessageListenerContainer.java:2372)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doInvokeBatchOnMessage(KafkaMessageListenerContainer.java:2008)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeBatchOnMessageWithRecordsOrList(KafkaMessageListenerContainer.java:1978)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeBatchOnMessage(KafkaMessageListenerContainer.java:1930)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doInvokeBatchListener(KafkaMessageListenerContainer.java:1842)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.access$2100(KafkaMessageListenerContainer.java:518)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$1.doInTransactionWithoutResult(KafkaMessageListenerContainer.java:1749)
at org.springframework.transaction.support.TransactionCallbackWithoutResult.doInTransaction(TransactionCallbackWithoutResult.java:36)
at org.springframework.transaction.support.TransactionTemplate.execute(TransactionTemplate.java:140)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeBatchListenerInTx(KafkaMessageListenerContainer.java:1740)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeBatchListener(KafkaMessageListenerContainer.java:1722)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeListener(KafkaMessageListenerContainer.java:1704)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeIfHaveRecords(KafkaMessageListenerContainer.java:1274)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1266)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:1161)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
at java.base/java.lang.Thread.run(Thread.java:832)
Suppressed: org.springframework.kafka.listener.ListenerExecutionFailedException: Restored Stack Trace
at org.springframework.kafka.listener.adapter.MessagingMessageListenerAdapter.invokeHandler(MessagingMessageListenerAdapter.java:363)
at org.springframework.kafka.listener.adapter.BatchMessagingMessageListenerAdapter.invoke(BatchMessagingMessageListenerAdapter.java:180)
at org.springframework.kafka.listener.adapter.BatchMessagingMessageListenerAdapter.onMessage(BatchMessagingMessageListenerAdapter.java:172)
at org.springframework.kafka.listener.adapter.BatchMessagingMessageListenerAdapter.onMessage(BatchMessagingMessageListenerAdapter.java:61)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doInvokeBatchOnMessage(KafkaMessageListenerContainer.java:1988)
Caused by: java.lang.IllegalStateException: No transaction is in process; possible solutions: run the template operation within the scope of a template.executeInTransaction() operation, start a transaction with #Transactional before invoking the template method, run in a transaction started by a listener container when consuming a record
at org.springframework.util.Assert.state(Assert.java:76)
at org.springframework.kafka.core.KafkaTemplate.getTheProducer(KafkaTemplate.java:657)
at org.springframework.kafka.core.KafkaTemplate.doSend(KafkaTemplate.java:569)
at org.springframework.kafka.core.KafkaTemplate.send(KafkaTemplate.java:406)
at com.fnb.fin.ifhEventSanitizer.kafka.KafkaTopicProducer.topicProducerNonAvro(KafkaTopicProducer.java:44)
at com.fnb.fin.ifhEventSanitizer.kafka.KafkaConsumerMoz.consume(KafkaConsumerMoz.java:108)
at jdk.internal.reflect.GeneratedMethodAccessor111.invoke(Unknown Source)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:564)
at org.springframework.messaging.handler.invocation.InvocableHandlerMethod.doInvoke(InvocableHandlerMethod.java:171)
at org.springframework.messaging.handler.invocation.InvocableHandlerMethod.invoke(InvocableHandlerMethod.java:120)
at org.springframework.kafka.listener.adapter.HandlerAdapter.invoke(HandlerAdapter.java:56)
at org.springframework.kafka.listener.adapter.MessagingMessageListenerAdapter.invokeHandler(MessagingMessageListenerAdapter.java:347)
at org.springframework.kafka.listener.adapter.BatchMessagingMessageListenerAdapter.invoke(BatchMessagingMessageListenerAdapter.java:180)
at org.springframework.kafka.listener.adapter.BatchMessagingMessageListenerAdapter.onMessage(BatchMessagingMessageListenerAdapter.java:172)
at org.springframework.kafka.listener.adapter.BatchMessagingMessageListenerAdapter.onMessage(BatchMessagingMessageListenerAdapter.java:61)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doInvokeBatchOnMessage(KafkaMessageListenerContainer.java:1988)
... 16 common frames omitted

The KafkaTransactionManager can only start a transaction in a producer from one factory; even if it could start two, you would lose EOS guarantees since they would be different transactions so, if you perform sends to both, they won't be in the same transaction.
To solve this problem, you should use one producer factory with a DelegatingByTypeSerializer or DelegatingByTopicSerializer.
e.g.
public ProducerFactory<String, Object> producerFactory() {
...
Map<Class<?>, Serializer> delegates = new LinkedHashMap<>(); // retains the order when iterating
delegates.put(String.class, new StringSerializer());
delegates.put(Object.class, new JsonSerializer<>());
DelegatingByTypeSerializer dbts = new DelegatingByTypeSerializer(delegates, true);
return new TracingProducerFactory<>(
new DefaultKafkaProducerFactory<>(config, new StringSerializer(), dbts), tracer);
}

Related

Getting ProducerFencedException on producing record in Kafka listener thread

I am getting this exception when producing the message inside the kafka listener container.
javax.management.InstanceAlreadyExistsException: kafka.producer:type=app-info,id=producer-tx-group.topicA.1
org.apache.kafka.common.errors.ProducerFencedException: The producer has been rejected from the broker because it tried to use an old epoch with the transactionalId
My listener looks like this
#Transactional
#kafkaListener(...)
listener(topicA, message){
process(message)
produce(topicB, notification) // use Kafkatemplate to send the message
}
My configuration looks like this
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory(KafkaTransactionManager kafkaTransactionManager) {
ConcurrentKafkaListenerContainerFactory<String, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.getContainerProperties().setTransactionManager(kafkaTransactionManager);
return factory;
}
public ProducerFactory<String, Object> producerFactory() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, enableIdempotence);
DefaultKafkaProducerFactory<String, Object> factory = new
DefaultKafkaProducerFactory<>(props);
factory.setTransactionIdPrefix(transactionIdPrefix);
return factory;
}
#Bean
public KafkaTemplate<String, Object> kafkaTemplate() {
KafkaTemplate<String, Object> template = new KafkaTemplate<>(producerFactory());
return template;
}
#Bean
public KafkaTransactionManager kafkaTransactionManager() {
KafkaTransactionManager manager = new KafkaTransactionManager(producerFactory());
return manager;
}
I know when ProducerFencedException is thrown by Kafka, But what I am trying to figure out here where is the second producer with the same transaction.id.
If I set the unique transaction prefix in the Kafka template it works fine
#Bean
public KafkaTemplate<String, Object> kafkaTemplate() {
KafkaTemplate<String, Object> template = new KafkaTemplate<>(producerFactory());
template.setTransactionIdPrefix(MessageFormat.format("{0}-{1}", transactionIdPrefix, UUID.randomUUID().toString()));
return template;
}
But I am trying to understand the exception here, from where the other producer is being started with the same transaction id which follow this pattern for listener started transactions as per spring docs group.id/topic/partition
I am just trying this locally on single application instance.
I found the root cause, I was creating two producer instances here
public ProducerFactory<String, Object> producerFactory() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, enableIdempotence);
DefaultKafkaProducerFactory<String, Object> factory = new
DefaultKafkaProducerFactory<>(props);
factory.setTransactionIdPrefix(transactionIdPrefix);
return factory;
}
I was missing Bean configuration.
Adding #Bean on producing factor and properly auto wiring it in template and TM fixed the issue.

How to load Kafka Consumer lazily in Spring boot?

I want to provide group Id through command line argument but when I tried this I got following error.
Failed to start bean 'org.springframework.kafka.config.internalKafkaListenerEndpointRegistry'; nested exception is java.lang.IllegalStateException: No group.id found in consumer config, container properties, or #KafkaListener annotation; a group.id is required when group management is used.
That means while loading kafkalistener it required group_id. If I gave groupId in consumerConfig file then Its working properly.
So is there any way so that I can give group Id through command line and kafka listener loads lazily So that I will not require while program starting.
My ConsumerConfig :
#Configuration
class KafkaConsumerConfig {
#Value("${kafka.bootstrap-servers}")
private String bootstrapServers;
#Autowired
private ArgumentModel argumentModel;
private Logger logger = LoggerFactory.getLogger(KafkaConsumerConfig.class);
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>();
logger.info("bootstrapServers : {}", bootstrapServers);
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.GROUP_ID_CONFIG, argumentModel.getKafkaGroupId());
return props;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
return factory;
}
}
#KafkaListener(... groupId = "${group.id}")
Then pass -Dgroup.id=myGroup on the command line.

Facing 'Unexpected error in AddOffsetsToTxnResponse' issue in spring kafka

I am using spring boot 2.2.7.RELEASE and spring Kafka 2.3.8 (and also Kafka chained transaction manager)
confluent kafka is broker.
I am facing some issues while Sending a message to Kafka. here are the logs
2020-07-09 08:28:13.139 ERROR [xxxxxx-component-workflow-handler,,,] 9 --- [_response-4-C-1] essageListenerContainer$ListenerConsumer : Send offsets to transaction failed
org.apache.kafka.common.KafkaException: Unexpected error in AddOffsetsToTxnResponse: The producer attempted to use a producer id which is not currently assigned to its transactional id.
at org.apache.kafka.clients.producer.internals.TransactionManager$AddOffsetsToTxnHandler.handleResponse(TransactionManager.java:1406)
at org.apache.kafka.clients.producer.internals.TransactionManager$TxnRequestHandler.onComplete(TransactionManager.java:1069)
at org.apache.kafka.clients.ClientResponse.onComplete(ClientResponse.java:109)
at org.apache.kafka.clients.NetworkClient.completeResponses(NetworkClient.java:561)
at org.apache.kafka.clients.NetworkClient.poll(NetworkClient.java:553)
at org.apache.kafka.clients.producer.internals.Sender.maybeSendAndPollTransactionalRequest(Sender.java:425)
at org.apache.kafka.clients.producer.internals.Sender.runOnce(Sender.java:311)
at org.apache.kafka.clients.producer.internals.Sender.run(Sender.java:244)
at java.base/java.lang.Thread.run(Thread.java:832)
2020-07-09 08:28:13.153 ERROR [xxxxxx-component-workflow-handler,,,] 9 --- [_response-4-C-1] o.s.k.core.DefaultKafkaProducerFactory : commitTransaction failed: CloseSafeProducer [delegate=org.apache.kafka.clients.producer.KafkaProducer#3d0f1d94, txId=xxxxxx-dagusa-Process-Handler-et3YEAYB1R1F6h-complete_fulfillment_item_response.complete_fulfillment_item_response.4]
org.apache.kafka.common.KafkaException: Cannot execute transactional method because we are in an error state
at org.apache.kafka.clients.producer.internals.TransactionManager.maybeFailWithError(TransactionManager.java:924)
at org.apache.kafka.clients.producer.internals.TransactionManager.lambda$beginCommit$2(TransactionManager.java:296)
at org.apache.kafka.clients.producer.internals.TransactionManager.handleCachedTransactionRequestResult(TransactionManager.java:1008)
at org.apache.kafka.clients.producer.internals.TransactionManager.beginCommit(TransactionManager.java:295)
at org.apache.kafka.clients.producer.KafkaProducer.commitTransaction(KafkaProducer.java:704)
at org.springframework.kafka.core.DefaultKafkaProducerFactory$CloseSafeProducer.commitTransaction(DefaultKafkaProducerFactory.java:691)
at brave.kafka.clients.TracingProducer.commitTransaction(TracingProducer.java:72)
at org.springframework.kafka.core.KafkaResourceHolder.commit(KafkaResourceHolder.java:58)
at org.springframework.kafka.transaction.KafkaTransactionManager.doCommit(KafkaTransactionManager.java:200)
at org.springframework.transaction.support.AbstractPlatformTransactionManager.processCommit(AbstractPlatformTransactionManager.java:743)
at org.springframework.transaction.support.AbstractPlatformTransactionManager.commit(AbstractPlatformTransactionManager.java:711)
at org.springframework.data.transaction.MultiTransactionStatus.commit(MultiTransactionStatus.java:74)
at org.springframework.data.transaction.ChainedTransactionManager.commit(ChainedTransactionManager.java:150)
at org.springframework.transaction.support.TransactionTemplate.execute(TransactionTemplate.java:152)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeRecordListenerInTx(KafkaMessageListenerContainer.java:1569)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeRecordListener(KafkaMessageListenerContainer.java:1546)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeListener(KafkaMessageListenerContainer.java:1288)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1035)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:949)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
at java.base/java.lang.Thread.run(Thread.java:832)
Caused by: org.apache.kafka.common.KafkaException: Unexpected error in AddOffsetsToTxnResponse: The producer attempted to use a producer id which is not currently assigned to its transactional id.
at org.apache.kafka.clients.producer.internals.TransactionManager$AddOffsetsToTxnHandler.handleResponse(TransactionManager.java:1406)
at org.apache.kafka.clients.producer.internals.TransactionManager$TxnRequestHandler.onComplete(TransactionManager.java:1069)
at org.apache.kafka.clients.ClientResponse.onComplete(ClientResponse.java:109)
at org.apache.kafka.clients.NetworkClient.completeResponses(NetworkClient.java:561)
at org.apache.kafka.clients.NetworkClient.poll(NetworkClient.java:553)
at org.apache.kafka.clients.producer.internals.Sender.maybeSendAndPollTransactionalRequest(Sender.java:425)
at org.apache.kafka.clients.producer.internals.Sender.runOnce(Sender.java:311)
at org.apache.kafka.clients.producer.internals.Sender.run(Sender.java:244)
... 1 common frames omitted
2020-07-09 08:28:13.157 WARN [xxxxxx-component-workflow-handler,,,] 9 --- [_response-4-C-1] o.s.k.core.DefaultKafkaProducerFactory : Error during some operation; producer removed from cache: CloseSafeProducer [delegate=org.apache.kafka.clients.producer.KafkaProducer#3d0f1d94, txId=xxxxxx-dagusa-Process-Handler-et3YEAYB1R1F6h-complete_fulfillment_item_response.complete_fulfillment_item_response.4]
2020-07-09 08:28:13.167 ERROR [xxxxxx-component-workflow-handler,,,] 9 --- [_response-4-C-1] essageListenerContainer$ListenerConsumer : Transaction rolled back
org.springframework.transaction.HeuristicCompletionException: Heuristic completion: outcome state is mixed; nested exception is org.apache.kafka.common.KafkaException: Cannot execute transactional method because we are in an error state
at org.springframework.data.transaction.ChainedTransactionManager.commit(ChainedTransactionManager.java:177)
at org.springframework.transaction.support.TransactionTemplate.execute(TransactionTemplate.java:152)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeRecordListenerInTx(KafkaMessageListenerContainer.java:1569)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeRecordListener(KafkaMessageListenerContainer.java:1546)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeListener(KafkaMessageListenerContainer.java:1288)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1035)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:949)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
at java.base/java.lang.Thread.run(Thread.java:832)
Caused by: org.apache.kafka.common.KafkaException: Cannot execute transactional method because we are in an error state
at org.apache.kafka.clients.producer.internals.TransactionManager.maybeFailWithError(TransactionManager.java:924)
at org.apache.kafka.clients.producer.internals.TransactionManager.lambda$beginCommit$2(TransactionManager.java:296)
at org.apache.kafka.clients.producer.internals.TransactionManager.handleCachedTransactionRequestResult(TransactionManager.java:1008)
at org.apache.kafka.clients.producer.internals.TransactionManager.beginCommit(TransactionManager.java:295)
at org.apache.kafka.clients.producer.KafkaProducer.commitTransaction(KafkaProducer.java:704)
at org.springframework.kafka.core.DefaultKafkaProducerFactory$CloseSafeProducer.commitTransaction(DefaultKafkaProducerFactory.java:691)
at brave.kafka.clients.TracingProducer.commitTransaction(TracingProducer.java:72)
at org.springframework.kafka.core.KafkaResourceHolder.commit(KafkaResourceHolder.java:58)
at org.springframework.kafka.transaction.KafkaTransactionManager.doCommit(KafkaTransactionManager.java:200)
at org.springframework.transaction.support.AbstractPlatformTransactionManager.processCommit(AbstractPlatformTransactionManager.java:743)
at org.springframework.transaction.support.AbstractPlatformTransactionManager.commit(AbstractPlatformTransactionManager.java:711)
at org.springframework.data.transaction.MultiTransactionStatus.commit(MultiTransactionStatus.java:74)
at org.springframework.data.transaction.ChainedTransactionManager.commit(ChainedTransactionManager.java:150)
... 9 common frames omitted
Caused by: org.apache.kafka.common.KafkaException: Unexpected error in AddOffsetsToTxnResponse: The producer attempted to use a producer id which is not currently assigned to its transactional id.
at org.apache.kafka.clients.producer.internals.TransactionManager$AddOffsetsToTxnHandler.handleResponse(TransactionManager.java:1406)
at org.apache.kafka.clients.producer.internals.TransactionManager$TxnRequestHandler.onComplete(TransactionManager.java:1069)
at org.apache.kafka.clients.ClientResponse.onComplete(ClientResponse.java:109)
at org.apache.kafka.clients.NetworkClient.completeResponses(NetworkClient.java:561)
at org.apache.kafka.clients.NetworkClient.poll(NetworkClient.java:553)
at org.apache.kafka.clients.producer.internals.Sender.maybeSendAndPollTransactionalRequest(Sender.java:425)
at org.apache.kafka.clients.producer.internals.Sender.runOnce(Sender.java:311)
at org.apache.kafka.clients.producer.internals.Sender.run(Sender.java:244)
... 1 common frames omitted
Here is my config:
kafka sender config:
#Configuration
#EnableKafka
public class KafkaSenderConfig{
#Value("${kafka.servers}")
private String kafkaServers;
#Value("${application.name}")
private String applicationName;
private static final Logger log = LoggerFactory.getLogger(KafkaSenderConfig.class);
#Bean(value = "stringKafkaTransactionManager")
public KafkaTransactionManager<String, String> kafkaStringTransactionManager() {
KafkaTransactionManager<String, String> ktm = new KafkaTransactionManager<String, String>(stringProducerFactory());
ktm.setNestedTransactionAllowed(true);
ktm.setTransactionSynchronization(AbstractPlatformTransactionManager.SYNCHRONIZATION_ALWAYS);
return ktm;
}
#Bean(value = "stringProducerFactory")
#Primary
public ProducerFactory<String, String> stringProducerFactory() {
log.debug("Kafka Servers: " + kafkaServers);
Map<String, Object> config = getConfigs();
//config.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
DefaultKafkaProducerFactory<String, String> defaultKafkaProducerFactory = new DefaultKafkaProducerFactory<>(config);
String randomString=applicationName.replaceAll("\\s+","-").concat("-").concat(StringUtil.getRandomString(14)).concat("-");
defaultKafkaProducerFactory.setTransactionIdPrefix(randomString);
return defaultKafkaProducerFactory;
}
/**
* Create a new Kafka Template for String based Messages
*
* #return
*/
#Bean(value = "stringKafkaTemplate")
#Primary
public KafkaTemplate<String, String> stringKafkaTemplate() {
log.debug("Creating the Kafka Template for String Producer Factory");
return new KafkaTemplate<>(stringProducerFactory(),true);
}
#Bean(name = "chainedStringKafkaTransactionManager")
#Primary
public ChainedKafkaTransactionManager<String, String> chainedTransactionManager(JpaTransactionManager jpaTransactionManager, DataSourceTransactionManager dsTransactionManager) {
return new ChainedKafkaTransactionManager<>(kafkaStringTransactionManager(), jpaTransactionManager, dsTransactionManager);
}
private Map<String, Object> getConfigs() {
Map<String, Object> config = new ConcurrentHashMap<>();
config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
config.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
//Try to send msgs out in 100ms even if the batch size is not met
config.put(ProducerConfig.LINGER_MS_CONFIG, 100);
config.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
config.put(ProducerConfig.ACKS_CONFIG, "all");
return config;
}
}
kafka receiver config:
#Configuration
#EnableKafka
public class KafkaReceiverConfig {
// Kafka Server Configuration
#Value("${kafka.servers}")
private String kafkaServers;
// Group Identifier
#Value("${kafka.groupId}")
private String groupId;
// Kafka Max Retry Attempts
#Value("${kafka.retry.maxAttempts:3}")
private Integer retryMaxAttempts;
// Kafka Max Retry Interval
#Value("${kafka.retry.interval:30000}")
private Long retryInterval;
// Kafka Concurrency
#Value("${kafka.concurrency:10}")
private Integer concurrency;
// Kafka Concurrency
#Value("${kafka.poll.timeout:300}")
private Integer pollTimeout;
// Kafka Consumer Offset
#Value("${kafka.consumer.auto-offset-reset:earliest}")
private String offset = "earliest";
#Value("${kafka.max.records:100}")
private Integer maxPollRecords;
#Value("${kafka.max.poll.interval.time:500000}")
private Integer maxPollIntervalMs;
#Value("${kafka.max.session.timeout:60000}")
private Integer sessionTimoutMs;
// String Kafka Template to send Messages
#Autowired
#Qualifier("stringKafkaTemplate")
private KafkaTemplate<String, String> stringKafkaTemplate;
// Logger
private static final Logger log = LoggerFactory.getLogger(KafkaReceiverConfig.class);
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory(
ChainedKafkaTransactionManager<String, String> chainedTM, MessageProducer messageProducer) {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(pollTimeout);
factory.getContainerProperties().setAckMode(AckMode.RECORD);
factory.getContainerProperties().setSyncCommits(true);
factory.getContainerProperties().setAckOnError(false);
factory.getContainerProperties().setTransactionManager(chainedTM);
DefaultAfterRollbackProcessor<String, String> afterRollbackProcessor = new DefaultAfterRollbackProcessor<>(
(record, exception) -> {
log.warn("failed to process kafka message (retries are exausted). topic name:" + record.topic()
+ " value:" + record.value());
messageProducer.saveFailedMessage(record, exception);
}, new FixedBackOff(retryInterval, retryMaxAttempts));
afterRollbackProcessor.setCommitRecovered(true);
afterRollbackProcessor.setKafkaTemplate(stringKafkaTemplate);
factory.setAfterRollbackProcessor(afterRollbackProcessor);
log.debug("Kafka Receiver Config kafkaListenerContainerFactory created");
return factory;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
log.debug("Kafka Receiver Config consumerFactory created");
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new ConcurrentHashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalMs);
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimoutMs);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offset);
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
log.debug("Kafka Receiver Config consumerConfigs created");
return props;
}
}
here is my code to send message
#Transactional(readOnly = false)
public void initiateOrderUpdate(String jsonString){
// some logic here
stringKafkaTemplate.send("some_tpic", jsonString);
// some logic here
}
previously I was using spring boot 2.1.9 and spring Kafka 2.2.9, everything working fine, but after upgrading to above one I am facing this issue.
Is there any issue with configuration ?

When set ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG to IntegerSerializer in ProducerConfigs in Spring boot kafka, it gives class cast exception

I m using this Spring boot with kafka
to setup my project. but when i run it it will give org.apache.kafka.common.errors.SerializationException: Can't convert key of class java.lang.Integer to class org.apache.kafka.common.serialization.StringSerializer specified in key.serializer
Caused by: java.lang.ClassCastException: java.lang.Integer cannot be cast to java.lang.String
this exception when send the request using kafka template.
#EmbeddedKafka(partitions = 1)
#SpringBootTest
class KafkaApplicationTests {
#Autowired
private MyKafkaListener listener;
#Autowired
private KafkaTemplate<Integer, String> template;
#Autowired
private EmbeddedKafkaBroker embeddedKafka;
#Test
public void testSimple() throws Exception {
template.send("annotated1", 0, "foo");
template.flush();
assertTrue(this.listener.latch1.await(10, TimeUnit.SECONDS));
}
#Configuration
#EnableKafka
public class Config {
#Bean
ConcurrentKafkaListenerContainerFactory<Integer, String>
kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<Integer, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
return factory;
}
#Bean
public ConsumerFactory<Integer, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>();
// props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafka.getBrokersAsString());
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
return props;
}
#Bean
public ProducerFactory<Integer, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
#Bean
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
// props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafka.getBrokersAsString());
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
return props;
}
#Bean
public KafkaTemplate<Integer, String> kafkaTemplate() {
return new KafkaTemplate<Integer, String>(producerFactory());
}
}
}
But when I change that template.send("annotated1", "key-foo", "foo"); it will work. I have used
#Bean
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
return props;
}
this configuration as well. But it is still giving me the
Caused by: java.lang.ClassCastException: java.lang.Integer cannot be cast to java.lang.String
If someone there please help me. thanks
You need to add the main application class and Config class to the #SpringBootTest
#SpringBootTest(classes = { So61985794Application.class, So61985794ApplicationTests.Config.class })
to override Boot's normal configuration.

Problems adding multiple KafkaListenerContainerFactories

Hi I'm currently dabbling in Spring Kafka and succeeded in adding a single KafkaListenerContainerFactory to my listener. Now I'd like to add multiple KafkaListenerContainerFactorys (One for a topic that will have messages in json, another one for strings). See code below:
#EnableKafka
#Configuration
public class KafkaConsumersConfig {
private final KafkaConfiguration kafkaConfiguration;
#Autowired
public KafkaConsumersConfig(KafkaConfiguration kafkaConfiguration) {
this.kafkaConfiguration = kafkaConfiguration;
}
#Bean
public KafkaListenerContainerFactory<?> kafkaJsonListenerContainerFactory(){
ConcurrentKafkaListenerContainerFactory<String,Record> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(jsonConsumerFactory());
factory.setConcurrency(3);
factory.setAutoStartup(true);
return factory;
}
#Bean
public ConsumerFactory<String,Record> jsonConsumerFactory(){
JsonDeserializer<Record> jsonDeserializer = new JsonDeserializer<>(Record.class);
return new DefaultKafkaConsumerFactory<>(jsonConsumerConfigs(),new StringDeserializer(), jsonDeserializer);
}
#Bean
public Map<String,Object> jsonConsumerConfigs(){
Map<String,Object> propsMap = new HashMap<>();
propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConfiguration.getBrokerAddress());
propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, kafkaConfiguration.getJsonGroupId());
propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, kafkaConfiguration.getAutoCommit());
propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, kafkaConfiguration.getAutoCommitInterval());
propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, kafkaConfiguration.getSessionTimeout());
return propsMap;
}
#Bean
public KafkaListenerContainerFactory<?> kafkaFileListenerContainerFactory(){
ConcurrentKafkaListenerContainerFactory<String,String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(fileConsumerFactory());
factory.setConcurrency(3);
factory.setAutoStartup(true);
return factory;
}
#Bean
public ConsumerFactory<String,String> fileConsumerFactory(){
return new DefaultKafkaConsumerFactory<>(fileConsumerConfigs());
}
#Bean
public Map<String,Object> fileConsumerConfigs(){
Map<String,Object> propsMap = new HashMap<>();
propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConfiguration.getBrokerAddress());
propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, kafkaConfiguration.getFileGroupId());
propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, kafkaConfiguration.getAutoCommit());
propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, kafkaConfiguration.getAutoCommitInterval());
propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, kafkaConfiguration.getSessionTimeout());
return propsMap;
}
}
Running this gives me the following error:
Description:
Parameter 1 of method kafkaListenerContainerFactory in org.springframework.boot.autoconfigure.kafka.KafkaAnnotationDrivenConfiguration required a bean of type 'org.springframework.kafka.core.ConsumerFactory' that could not be found.
- Bean method 'kafkaConsumerFactory' in 'KafkaAutoConfiguration' not loaded because #ConditionalOnMissingBean (types: org.springframework.kafka.core.ConsumerFactory; SearchStrategy: all) found beans 'jsonConsumerFactory', 'fileConsumerFactory'
Action:
Consider revisiting the conditions above or defining a bean of type 'org.springframework.kafka.core.ConsumerFactory' in your configuration.
What am I doing wrong?
Looks like you are not going to rely on the Spring Boot's Kafka Auto Configuration.
Spring Boot provides in the KafkaAutoConfiguration:
#Bean
#ConditionalOnMissingBean(ConsumerFactory.class)
public ConsumerFactory<?, ?> kafkaConsumerFactory() {
Since you have jsonConsumerFactory and fileConsumerFactory, they override that one provided by the auto-config.
But on the other hand, in the KafkaAnnotationDrivenConfiguration, non of your factories can be applied:
#Bean
#ConditionalOnMissingBean(name = "kafkaListenerContainerFactory")
public ConcurrentKafkaListenerContainerFactory<?, ?> kafkaListenerContainerFactory(
ConcurrentKafkaListenerContainerFactoryConfigurer configurer,
ConsumerFactory<Object, Object> kafkaConsumerFactory) {
Because your ConsumerFactory beans are not of ConsumerFactory<Object, Object> type.
So:
Just exclude KafkaAutoConfiguration from the Spring Boot auto configuration by adding the following to the application properties file:
spring.autoconfigure.exclude=org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration
or rename one of your KafkaListenerContainerFactory beans to the kafkaListenerContainerFactory to override it in the Boot
or make one of the ConsumerFactory beans as a ConsumerFactory<Object, Object> type.
I have achieved it below code and its working fine for me.
// LISTENER 1
#Bean
#ConditionalOnMissingBean(name = "yourListenerFactory1")
public ConsumerFactory<String, YourCustomObject1> yourConsumerFactory1() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(ConsumerConfig.GROUP_ID_CONFIG, "YOUR-GROUP-1");
return new DefaultKafkaConsumerFactory<>(props, new StringDeserializer(),
new JsonDeserializer<>(YourCustomObject1.class));
}
#Bean(name = "yourListenerFactory1")
public ConcurrentKafkaListenerContainerFactory<String, YourCustomObject1>
yourListenerFactory1() {
ConcurrentKafkaListenerContainerFactory<String, YourCustomObject1> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(yourConsumerFactory1());
ContainerProperties containerProperties = factory.getContainerProperties();
containerProperties.setPollTimeout(...);
containerProperties.setAckMode(AckMode...);
return factory;
}
// LISTENER 2
#Bean
#ConditionalOnMissingBean(name = "yourListenerFactory2")
public ConsumerFactory<String, YourCustomObject2> yourConsumerFactory2() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
props.put(ConsumerConfig.GROUP_ID_CONFIG, "YOUR-GROUP-2");
return new DefaultKafkaConsumerFactory<>(props, new StringDeserializer(),
new JsonDeserializer<>(YourCustomObject2.class));
}
#Bean(name = "yourListenerFactory2")
public ConcurrentKafkaListenerContainerFactory<String, YourCustomObject2>
yourListenerFactory2() {
ConcurrentKafkaListenerContainerFactory<String, YourCustomObject2> factory
= new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(yourConsumerFactory2());
ContainerProperties containerProperties = factory.getContainerProperties();
containerProperties.setPollTimeout(...);
containerProperties.setAckMode(AckMode...);
return factory;
}
Also, I have set spring.autoconfigure.exclude property as ITS MUST
spring.autoconfigure.exclude=org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration
This is my Consumer config
Consumer 1
#KafkaListener(id = "your-cousumer-1",
topicPattern = "your-topic-1",
containerFactory = "yourListenerFactory1")
public void consumer1(YourCustomObject1 data,
Acknowledgment acknowledgment,
#Header(KafkaHeaders.RECEIVED_PARTITION_ID) List<Integer> partitions,
#Header(KafkaHeaders.RECEIVED_TOPIC) List<String> topics,
#Header(KafkaHeaders.OFFSET) List<Long> offsets) throws Exception { ... }
Consumer 2
#KafkaListener(id = "your-cousumer-2",
topicPattern = "your-topic-2",
containerFactory = "yourListenerFactory2")
public void consumer2(YourCustomObject2 data,
Acknowledgment acknowledgment,
#Header(KafkaHeaders.RECEIVED_PARTITION_ID) List<Integer> partitions,
#Header(KafkaHeaders.RECEIVED_TOPIC) List<String> topics,
#Header(KafkaHeaders.OFFSET) List<Long> offsets) throws Exception { ... }
Also, my kafka template was
#Autowired
KafkaTemplate<String, Object> kafkaTemplate;
You can define each container factory in KafkaListener definition as follow:
#KafkaListener(topics = "fileTopic", containerFactory = "kafkaFileListenerContainerFactory")
public void fileConsumer(...) {...}
#KafkaListener(topics = "jsonTopic", containerFactory = "kafkaJsonListenerContainerFactory")
public void jsonConsumer(...) {...}

Resources