Recoverycallback is running 2 times during retry message in spring kafka 2.2.0 - spring-boot

I'm retrying message n times in main topic then after maxattempt exhausted publishing it in DLT topic. But when it gets exhausted calling recoverycallback where I'm throwing custom exception so that errorhandler handler the error using seektocurrenthandler and publish message in the DLT topic. But after n retries it goes in recoverycallback then throw the exception but instead of calling errorhandler it again retying n times calling recoverycallback then handle error in errorhandler. I have tried so many example but didn't work
It's flow like that...................
n retries-> recoverycallback -> again n retries -> recoverycallback ->errorhandler.
public class KafkaQueueConfiguration {
private static Logger log = LoggerFactory.getLogger(KafkaQueueConfiguration.class);
#Autowired
private StringRedisTemplate redisTemplate;
#Value("${bootstrap.ip}")
private String consumerBootstrap;
#Value("${group.id}")
private String consumerGroupId;
#Value("${consumer.offset.type}")
private String autoOffsetType;
#Value("${kafka.backoff.interval}")
private Long fixedInterval;
#Value("${kafka.maximum.poll.records}")
private Integer maxPollRecords;
#Value("${bank-notification-sasl-jaas-config}")
private String saslJaasConfig;
#Value("${bank-notification-json-type-mapping}")
private String jsonTypeMapping;
#Value("${bank-notification-dejson-delegate}")
private String deJsonDelegate;
#Value("${kafka.topics.bank.notification.dlt}")
private String dlqName;
#Value("${kafka-consumer-saslMechanism}")
private String saslMechanism;
#Value("${kafka-consumer-securityProtocol}")
private String securityProtocol;
#Value(("${heartbeat-interval-ms}"))
private String heartbeatInterval;
#Value(("${session-timeout-ms}"))
private String sessionTimeout;
#Value(("${max-poll-interval-ms}"))
private Integer maxPollInterval;
#Value(("${max-retry-attempt-ms}"))
private Integer maxRetryAttempts;
#Value(("${retry-interval-ms}"))
private long retryInterval;
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, consumerBootstrap);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ErrorHandlingDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class);
props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetType);
props.put(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG,fixedInterval);
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG,maxPollInterval);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,maxPollRecords);
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);
props.put(SaslConfigs.SASL_JAAS_CONFIG,saslJaasConfig);
props.put(JsonDeserializer.TYPE_MAPPINGS,jsonTypeMapping);
props.put(SaslConfigs.SASL_MECHANISM,saslMechanism);
props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG,securityProtocol);
props.put(ErrorHandlingDeserializer.VALUE_DESERIALIZER_CLASS,deJsonDelegate);
props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG,heartbeatInterval);
props.put(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG ,SslConfigs.DEFAULT_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM);
return props;
}
#Bean
public ConsumerFactory<Object, Object> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
#Bean
public ConcurrentKafkaListenerContainerFactory<Object, Object> kafkaListenerContainerFactory(
ConcurrentKafkaListenerContainerFactoryConfigurer configurer,
ConsumerFactory<Object, Object> kafkaConsumerFactory, KafkaTemplate<Object, Object> template) {
ConcurrentKafkaListenerContainerFactory<Object, Object> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
factory.getContainerProperties().setAckOnError(false);
factory.setRetryTemplate(retryTemplate());
factory.setRecoveryCallback((context->{
log.info("Recovery callback");
Acknowledgment ack= (Acknowledgment) context.getAttribute(RetryingMessageListenerAdapter.CONTEXT_ACKNOWLEDGMENT);
log.info("Manually committed..{}",countRecover);
log.info("RetryCount :: {} and last retry exception :: {}",context.getRetryCount(),context.getLastThrowable().getCause().getMessage());
log.info("Retry records :: {}",context.getAttribute(RetryingMessageListenerAdapter.CONTEXT_RECORD));
ConsumerRecord consumerRecord= (ConsumerRecord) context.getAttribute(RetryingMessageListenerAdapter.CONTEXT_RECORD);
EftTransactionDetail eftTransactionDetail= (EftTransactionDetail) consumerRecord.value();
log.info("Retrying records ::{}",eftTransactionDetail);
redisTemplate.opsForValue().setIfAbsent(BankNotificationsConstants.SMS_REDIS_PREFIX + eftTransactionDetail.getAtdEntryId(),eftTransactionDetail.getAtdEntryId());
ack.acknowledge();
throw new ValidationException(SystemTag.HG,context.getLastThrowable().getCause().getMessage(),context.getLastThrowable().getCause().getMessage());
}));
factory.setErrorHandler(errorHandler(publisher(template)));
configurer.configure(factory, kafkaConsumerFactory);
return factory;
}
#Bean
public SeekToCurrentErrorHandler errorHandler(DeadLetterPublishingRecoverer deadLetterPublishingRecoverer) {
return new SeekToCurrentErrorHandler(deadLetterPublishingRecoverer,0);//set maxfailure 0 attempts
}
#Bean
public DeadLetterPublishingRecoverer publisher(KafkaTemplate<Object, Object> template) {
return new DeadLetterPublishingRecoverer(template);
}
public RetryTemplate retryTemplate(){
RetryTemplate retryTemplate = new RetryTemplate();
retryTemplate.setRetryPolicy(new SimpleRetryPolicy(maxRetryAttempts));
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(retryInterval);
retryTemplate.setBackOffPolicy(backOffPolicy);
return retryTemplate;
}
}

Related

KafkaConsumer With Multiple Different Avro Producers And Transactions

I have a single kafka consumer. It consumes a string. Based on the string we then convert it to different avro object and publish them to different topics. We require EOS and the issue we are getting is the producer marked with #Primary works however the one without primary fails with the error below. Is there anyway to accomodate both?
KafkaConsumer
#Configuration
public class KafkaConsumerConfig {
#Value("${kafka.server}")
String server;
#Value("${kafka.consumer.groupid}")
String groupid;
#Autowired
Tracer tracer;
#Bean
public ConsumerFactory<String, String> consumerFactory() {
Map<String, Object> config = new HashMap<>();
config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, server);
config.put(ConsumerConfig.GROUP_ID_CONFIG, groupid);
config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
config.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
config.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 120000);
config.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 10000);
//config.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 15000);
return new TracingConsumerFactory<>(new DefaultKafkaConsumerFactory<>(config), tracer);
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory(
KafkaAwareTransactionManager<Object, Object> transactionManager) {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<String, String>();
factory.setConsumerFactory(consumerFactory());
factory.setAutoStartup(false);
factory.setConcurrency(2);
factory.setBatchListener(true);
factory.getContainerProperties().setAckMode(AckMode.BATCH);
factory.getContainerProperties().setEosMode(EOSMode.ALPHA);
factory.getContainerProperties().setTransactionManager(transactionManager);
return factory;
}
}
KafkaProducer 1
#Configuration
public class KafkaProducerConfig {
#Value("${kafka.server}")
String server;
#Autowired
public Tracer tracer;
String tranId = "eventsanavro";
#Bean(name = "transactionalProducerFactoryAvro")
public ProducerFactory<String, TransactionAvroEntity> producerFactoryavro() {
Map<String, Object> config = new HashMap<>();
config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, server);
config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, AvroSerializer.class.getName());
config.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
config.put(ProducerConfig.ACKS_CONFIG, "all");
config.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, tranId);
config.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");
config.put(ProducerConfig.LINGER_MS_CONFIG, "200");
config.put(ProducerConfig.BATCH_SIZE_CONFIG, Integer.toString(256 * 1024));
config.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 120000);
config.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 60000);
config.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 5);
config.put(ProducerConfig.BUFFER_MEMORY_CONFIG, Integer.toString(32768 * 1024));
config.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
config.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, tranId);
return new TracingProducerFactory<>(new DefaultKafkaProducerFactory<>(config), tracer);
}
#Qualifier("transactionalProducerFactoryAvro")
#Bean(name = "transactionalKafkaTemplateAvro")
public KafkaTemplate<String, TransactionAvroEntity> kafkaTemplate() {
return new KafkaTemplate<>(producerFactoryavro());
}
#Qualifier("transactionalProducerFactoryAvro")
#Bean(name = "transactionalKafkaTransactionManagerAvro")
public KafkaAwareTransactionManager<?, ?> kafkaTransactionManager(
ProducerFactory<String, TransactionAvroEntity> producerFactory) {
return new KafkaTransactionManager<>(producerFactory);
}
}
KafkaProducer 2
#Configuration
public class KafkaProducerNonAvroConfig {
#Value("${kafka.server}")
String server;
#Autowired
public Tracer tracer;
String tranId = "eventsannonavro";
#Primary
#Bean(name = "transactionalProducerFactoryNonAvro")
public ProducerFactory<String, String> producerFactoryNonAvro() {
Map<String, Object> config = new HashMap<>();
config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, server);
config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
config.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
config.put(ProducerConfig.ACKS_CONFIG, "all");
config.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, tranId);
config.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");
config.put(ProducerConfig.LINGER_MS_CONFIG, "200");
config.put(ProducerConfig.BATCH_SIZE_CONFIG, Integer.toString(256 * 1024));
config.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,120000);
config.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,60000);
config.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 5);
config.put(ProducerConfig.BUFFER_MEMORY_CONFIG, Integer.toString(32768* 1024));
config.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
config.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, tranId);
return new TracingProducerFactory<>(new DefaultKafkaProducerFactory<>(config), tracer);
}
#Primary
#Qualifier("transactionalProducerFactoryNonAvro")
#Bean(name = "transactionalKafkaTemplateNonAvro")
public KafkaTemplate<String, String> kafkatemplate() {
return new KafkaTemplate<>(producerFactoryNonAvro());
}
#Primary
#Qualifier("transactionalProducerFactoryNonAvro")
#Bean(name = "transactionalKafkaTransactionManagerNonAvro")
public KafkaAwareTransactionManager<?, ?> kafkaTransactionManager(ProducerFactory<String, String> producerFactory) {
return new KafkaTransactionManager<>(producerFactory);
}
}
ProducerWrapper
#Service
public class KafkaTopicProducer {
#Autowired
private KafkaTemplate<String, TransactionAvroEntity> kafkaTemplate;
#Autowired
private KafkaTemplate<String, String> kafkaProducerNonAvrokafkaTemplate;
public void topicProducerAvro(TransactionAvroEntity payload, String topic, Headers headers) {
ProducerRecord<String, TransactionAvroEntity> producerRecord = new ProducerRecord<String, TransactionAvroEntity>(
topic, null, UUID.randomUUID().toString(), payload, headers);
kafkaTemplate.send(producerRecord);
}
public void kafkaAvroFlush() {
kafkaTemplate.flush();
}
public void topicProducerNonAvro(String payload, String topic, Headers headers) {
ProducerRecord<String, String> producerRecord = new ProducerRecord<String, String>(topic, null,
UUID.randomUUID().toString(), payload, headers);
kafkaProducerNonAvrokafkaTemplate.send(producerRecord);
}
public void kafkaNonAvroFlush() {
kafkaProducerNonAvrokafkaTemplate.flush();
}
}
ERROR
Caused by: java.lang.IllegalStateException: No transaction is in process; possible solutions: run the template operation within the scope of a template.executeInTransaction() operation, start a transaction with #Transactional before invoking the template method, run in a transaction started by a listener container when consuming a record
Full Stack Trace
2022-05-03 09:35:11,358 INFO [nerMoz-0-C-1] o.a.kafka.clients.consumer.KafkaConsumer : [Consumer clientId=consumer-ifhEventSanitizer-1, groupId=ifhEventSanitizer] Seeking to offset 0 for partition za.local.file.singleLineGLTransactionEvent.1-0
2022-05-03 09:35:11,883 INFO [nerMoz-0-C-1] o.a.kafka.clients.producer.KafkaProducer : [Producer clientId=producer-eventsanavroifhEventSanitizer.za.local.file.singleLineGLTransactionEvent.1.0, transactionalId=eventsanavroifhEventSanitizer.za.local.file.singleLineGLTransactionEvent.1.0] Aborting incomplete transaction
2022-05-03 09:35:11,884 ERROR [nerMoz-0-C-1] essageListenerContainer$ListenerConsumer : Transaction rolled back
org.springframework.kafka.listener.ListenerExecutionFailedException: Listener method 'public boolean com.fnb.fin.ifhEventSanitizer.kafka.KafkaConsumerMoz.consume(java.util.List<org.apache.kafka.clients.consumer.ConsumerRecord<java.lang.String, java.lang.String>>,org.apache.kafka.clients.consumer.Consumer<?, ?>)' threw exception; nested exception is java.lang.IllegalStateException: No transaction is in process; possible solutions: run the template operation within the scope of a template.executeInTransaction() operation, start a transaction with #Transactional before invoking the template method, run in a transaction started by a listener container when consuming a record; nested exception is java.lang.IllegalStateException: No transaction is in process; possible solutions: run the template operation within the scope of a template.executeInTransaction() operation, start a transaction with #Transactional before invoking the template method, run in a transaction started by a listener container when consuming a record
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.decorateException(KafkaMessageListenerContainer.java:2372)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doInvokeBatchOnMessage(KafkaMessageListenerContainer.java:2008)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeBatchOnMessageWithRecordsOrList(KafkaMessageListenerContainer.java:1978)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeBatchOnMessage(KafkaMessageListenerContainer.java:1930)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doInvokeBatchListener(KafkaMessageListenerContainer.java:1842)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.access$2100(KafkaMessageListenerContainer.java:518)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$1.doInTransactionWithoutResult(KafkaMessageListenerContainer.java:1749)
at org.springframework.transaction.support.TransactionCallbackWithoutResult.doInTransaction(TransactionCallbackWithoutResult.java:36)
at org.springframework.transaction.support.TransactionTemplate.execute(TransactionTemplate.java:140)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeBatchListenerInTx(KafkaMessageListenerContainer.java:1740)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeBatchListener(KafkaMessageListenerContainer.java:1722)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeListener(KafkaMessageListenerContainer.java:1704)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeIfHaveRecords(KafkaMessageListenerContainer.java:1274)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1266)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:1161)
at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
at java.base/java.lang.Thread.run(Thread.java:832)
Suppressed: org.springframework.kafka.listener.ListenerExecutionFailedException: Restored Stack Trace
at org.springframework.kafka.listener.adapter.MessagingMessageListenerAdapter.invokeHandler(MessagingMessageListenerAdapter.java:363)
at org.springframework.kafka.listener.adapter.BatchMessagingMessageListenerAdapter.invoke(BatchMessagingMessageListenerAdapter.java:180)
at org.springframework.kafka.listener.adapter.BatchMessagingMessageListenerAdapter.onMessage(BatchMessagingMessageListenerAdapter.java:172)
at org.springframework.kafka.listener.adapter.BatchMessagingMessageListenerAdapter.onMessage(BatchMessagingMessageListenerAdapter.java:61)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doInvokeBatchOnMessage(KafkaMessageListenerContainer.java:1988)
Caused by: java.lang.IllegalStateException: No transaction is in process; possible solutions: run the template operation within the scope of a template.executeInTransaction() operation, start a transaction with #Transactional before invoking the template method, run in a transaction started by a listener container when consuming a record
at org.springframework.util.Assert.state(Assert.java:76)
at org.springframework.kafka.core.KafkaTemplate.getTheProducer(KafkaTemplate.java:657)
at org.springframework.kafka.core.KafkaTemplate.doSend(KafkaTemplate.java:569)
at org.springframework.kafka.core.KafkaTemplate.send(KafkaTemplate.java:406)
at com.fnb.fin.ifhEventSanitizer.kafka.KafkaTopicProducer.topicProducerNonAvro(KafkaTopicProducer.java:44)
at com.fnb.fin.ifhEventSanitizer.kafka.KafkaConsumerMoz.consume(KafkaConsumerMoz.java:108)
at jdk.internal.reflect.GeneratedMethodAccessor111.invoke(Unknown Source)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:564)
at org.springframework.messaging.handler.invocation.InvocableHandlerMethod.doInvoke(InvocableHandlerMethod.java:171)
at org.springframework.messaging.handler.invocation.InvocableHandlerMethod.invoke(InvocableHandlerMethod.java:120)
at org.springframework.kafka.listener.adapter.HandlerAdapter.invoke(HandlerAdapter.java:56)
at org.springframework.kafka.listener.adapter.MessagingMessageListenerAdapter.invokeHandler(MessagingMessageListenerAdapter.java:347)
at org.springframework.kafka.listener.adapter.BatchMessagingMessageListenerAdapter.invoke(BatchMessagingMessageListenerAdapter.java:180)
at org.springframework.kafka.listener.adapter.BatchMessagingMessageListenerAdapter.onMessage(BatchMessagingMessageListenerAdapter.java:172)
at org.springframework.kafka.listener.adapter.BatchMessagingMessageListenerAdapter.onMessage(BatchMessagingMessageListenerAdapter.java:61)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doInvokeBatchOnMessage(KafkaMessageListenerContainer.java:1988)
... 16 common frames omitted
The KafkaTransactionManager can only start a transaction in a producer from one factory; even if it could start two, you would lose EOS guarantees since they would be different transactions so, if you perform sends to both, they won't be in the same transaction.
To solve this problem, you should use one producer factory with a DelegatingByTypeSerializer or DelegatingByTopicSerializer.
e.g.
public ProducerFactory<String, Object> producerFactory() {
...
Map<Class<?>, Serializer> delegates = new LinkedHashMap<>(); // retains the order when iterating
delegates.put(String.class, new StringSerializer());
delegates.put(Object.class, new JsonSerializer<>());
DelegatingByTypeSerializer dbts = new DelegatingByTypeSerializer(delegates, true);
return new TracingProducerFactory<>(
new DefaultKafkaProducerFactory<>(config, new StringSerializer(), dbts), tracer);
}

How to use "li-apache-kafka-clients" in spring boot app to send large message (above 1MB) from Kafka producer?

How to use li-apache-kafka-clients in spring boot app to send large message (above 1MB) from Kafka producer to Kafka Consumer? Below is the GitHub link of li-apache-kafka-clients:
https://github.com/linkedin/li-apache-kafka-clients
I have imported .jar file of li-apache-kafka-clients and put the below configuration for producer:
props.put("large.message.enabled", "true");
props.put("max.message.segment.bytes", 1000 * 1024);
props.put("segment.serializer", DefaultSegmentSerializer.class.getName());
and for consumer:
message.assembler.buffer.capacity,
max.tracked.messages.per.partition,
exception.on.message.dropped,
segment.deserializer.class
but still getting error for large message. Please help me to solve error.
Below is my code, please let me know where I need to create LiKafkaProducer:
#Configuration
public class KafkaProducerConfig {
#Value("${kafka.boot.server}")
private String kafkaServer;
#Bean
public ProducerFactory<String, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfig());
}
#Bean
public KafkaTemplate<String, String> kafkaTemplate() {
return new KafkaTemplate<String, String>(producerFactory());
}
#Bean
public Map<String, Object> producerConfig() {
// TODO Auto-generated method stub
Map<String, Object> config = new HashMap<>();
config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
config.put("bootstrap.servers", "localhost:9092");
config.put("acks", "all");
config.put("retries", 0);
config.put("batch.size", 16384);
config.put("linger.ms", 1);
config.put("buffer.memory", 33554432);
// The following properties are used by LiKafkaProducerImpl
config.put("large.message.enabled", "true");
config.put("max.message.segment.bytes", 1000 * 1024);
config.put("segment.serializer", DefaultSegmentSerializer.class.getName());
config.put("auditor.class", LoggingAuditor.class.getName());
return config;
}
}
#RestController
#RequestMapping("/kafkaProducer")
public class KafkaProducerController {
#Autowired
private KafkaSender sender;
#PostMapping
public ResponseEntity<List<Student>> sendData(#RequestBody List<Student> student){
sender.sendData(student);
return new ResponseEntity<List<Student>>(student, HttpStatus.OK);
}
}
#Service
public class KafkaSender {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaSender.class);
#Autowired
private KafkaTemplate<String, String> kafkaTemplate;
#Value("${kafka.topic.name}")
private String topicName;
public void sendData(List<Student> student) {
// TODO Auto-generated method stub
Map<String, Object> headers = new HashMap<>();
headers.put(KafkaHeaders.TOPIC, topicName);
headers.put("payload", student.get(0));
// Construct a JSONObject from a Map.
JSONObject HeaderObject = new JSONObject(headers);
System.out.println("\nMethod-2: Using new JSONObject() ==> " + HeaderObject);
final String record = HeaderObject.toString();
Message<String> message = MessageBuilder.withPayload(record).setHeader(KafkaHeaders.TOPIC, topicName)
.setHeader(KafkaHeaders.MESSAGE_KEY, "Message")
.build();
kafkaTemplate.send(topicName, message.toString());
}
}
You would need to implement your own ConsumerFactory and ProducerFactory to create the LiKafkaConsumer and LiKafkaProducer respectively.
You should be able to subclass the default factories provided by the framework.

Transaction Synchronization in spring boot not working properly

Transaction synchronization and rollback are not working properly. And occasionally giving producerFencedException. is there any mistakes in my config or code..?
I have multiple instances of spring boot
1 docker broker
spring boot version: 2.1.4 Release
kafka sender config
#Configuration
#EnableKafka
public class KafkaSenderConfig{
#Value("${kafka.servers}")
private String kafkaServers;
#Value("${application.name}")
private String applicationName;
#Bean(value = "stringKafkaTransactionManager")
public KafkaTransactionManager<String, String> kafkaStringTransactionManager() {
KafkaTransactionManager<String, String> ktm = new KafkaTransactionManager<String, String>(stringProducerFactory());
ktm.setNestedTransactionAllowed(true);
ktm.setTransactionSynchronization(AbstractPlatformTransactionManager.SYNCHRONIZATION_ALWAYS);
return ktm;
}
#Bean(value = "stringProducerFactory")
#Primary
public ProducerFactory<String, String> stringProducerFactory() {
Map<String, Object> config = new ConcurrentHashMap<>();
config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
config.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
config.put(ProducerConfig.LINGER_MS_CONFIG, 100);
config.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
config.put(ProducerConfig.ACKS_CONFIG, "all");
DefaultKafkaProducerFactory<String, String> defaultKafkaProducerFactory = new DefaultKafkaProducerFactory<>(config);
defaultKafkaProducerFactory.setTransactionIdPrefix("sample-trans-");
return defaultKafkaProducerFactory;
}
#Bean(value = "stringKafkaTemplate")
#Primary
public KafkaTemplate<String, String> stringKafkaTemplate() {
return new KafkaTemplate<>(stringProducerFactory(),true);
}
#Bean(name = "chainedStringKafkaTransactionManager")
#Primary
public ChainedKafkaTransactionManager<String, String> chainedTransactionManager(JpaTransactionManager jpaTransactionManager, DataSourceTransactionManager dsTransactionManager) {
return new ChainedKafkaTransactionManager<>(kafkaStringTransactionManager(), jpaTransactionManager, dsTransactionManager);
}
}
kafka receiver config
#Configuration
#EnableKafka
public class KafkaReceiverConfig {
#Value("${kafka.servers}")
private String kafkaServers;
#Value("${kafka.groupId}")
private String groupId;
#Value("${kafka.retry.maxAttempts}")
private Integer retryMaxAttempts;
#Value("${kafka.retry.interval}")
private Long retryInterval;
#Value("${kafka.concurrency}")
private Integer concurrency;
#Value("${kafka.poll.timeout}")
private Integer pollTimeout;
#Value("${kafka.consumer.auto-offset-reset:earliest}")
private String offset = "earliest";
#Autowired
private PlatformTransactionManager transactionManager;
#Bean
public RetryPolicy retryPolicy() {
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
simpleRetryPolicy.setMaxAttempts(retryMaxAttempts);
return simpleRetryPolicy;
}
#Bean
public BackOffPolicy backOffPolicy() {
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(retryInterval);
return backOffPolicy;
}
#Bean
public RetryTemplate retryTemplate(){
RetryTemplate retryTemplate = new RetryTemplate();
retryTemplate.setRetryPolicy(retryPolicy());
retryTemplate.setBackOffPolicy(backOffPolicy());
return retryTemplate;
}
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<String, String>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(pollTimeout);
factory.getContainerProperties().setSyncCommits(true);
factory.setRetryTemplate(retryTemplate());
factory.getContainerProperties().setAckOnError(false);
factory.getContainerProperties().setTransactionManager(transactionManager);
return factory;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new ConcurrentHashMap<String, Object>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offset);
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
return props;
}
#Bean(name = { "jsonConsumerFactory" })
public ConsumerFactory<String, Object> jsonConsumerFactory() {
Map<String, Object> props = new ConcurrentHashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonSerializer.class);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
return new DefaultKafkaConsumerFactory<>(props);
}
#Bean(name = { "kafkaJsonListenerContainerFactory" })
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, Object>> kafkaJsonListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, Object> factory = new ConcurrentKafkaListenerContainerFactory<String, Object>();
factory.setConsumerFactory(jsonConsumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(pollTimeout);
factory.getContainerProperties().setSyncCommits(true);
return factory;
}
data source config
#Configuration
#EnableTransactionManagement
#EnableJpaRepositories(basePackages = "com.sample.entity.repository")
public class DatasourceConfig {
#Bean(name = "dataSourceProperties")
#ConfigurationProperties("spring.datasource")
public DataSourceProperties dataSourceProperties() {
return new DataSourceProperties();
}
#Bean(name = "datasource")
#Primary
public DataSource dataSource(#Qualifier("dataSourceProperties") DataSourceProperties properties) {
return properties.initializeDataSourceBuilder().type(HikariDataSource.class)
.build();
}
#Bean
public LocalContainerEntityManagerFactoryBean entityManagerFactory(#Qualifier("datasource") DataSource ds) throws PropertyVetoException {
LocalContainerEntityManagerFactoryBean entityManagerFactory = new LocalContainerEntityManagerFactoryBean();
entityManagerFactory.setDataSource(ds);
entityManagerFactory.setPackagesToScan(new String[]{"com.sample.entity.domain"});
JpaVendorAdapter jpaVendorAdapter = new HibernateJpaVendorAdapter();
entityManagerFactory.setJpaVendorAdapter(jpaVendorAdapter);
return entityManagerFactory;
}
#Bean
public DataSourceTransactionManager dsTransactionManager(#Qualifier("datasource") DataSource ds) {
return new DataSourceTransactionManager(ds);
}
#Bean
public PlatformTransactionManager transactionManager(EntityManagerFactory entityManagerFactory){
return jpaTransactionManager(entityManagerFactory);
}
#Bean
public JpaTransactionManager jpaTransactionManager(EntityManagerFactory entityManagerFactory){
JpaTransactionManager transactionManager = new JpaTransactionManager();
transactionManager.setEntityManagerFactory(entityManagerFactory);
return transactionManager;
}
#Bean
public JdbcTemplate jdbcTemplate(#Qualifier("datasource") DataSource dataSource) {
return new JdbcTemplate(dataSource);
}
}
producing a message with the transaction :
#Autowired
#Qualifier("stringKafkaTemplate")
private KafkaTemplate<String, String> stringKafkaTemplate;
#Autowired
private EmployeeRepository employeeRepository;
#Override
#Transactional
public void create(List<Employee> employees){
for (Employee emp : employees) {
employeeRepository.save(emp);
String jsonStr = JsonUtil.toString(emp);
stringKafkaTemplate.send("employee", jsonStr);
}
}
reciever
#KafkaListener(id = "employee", topics = "employee")
#Transactional(readOnly = false)
public void processRequest(#Payload String message) throws IOException {
/// its working fine
}
property file(Kafka config)
kafka.servers=localhost:9092
kafka.groupId=xyzabc
kafka.retry.maxAttempts=3
kafka.retry.interval=300000
kafka.concurrency=10
kafka.poll.timeout=1000
It appears that your listener is receiving Employee objects and your producer is creating them - i.e. you are not calling create() from the listener.
As I said in my comment to your other question yesterday...
If you are producing messages on a listener container thread, the transactional.id is <prefix><group>.<topic>.<partition>. Since a partition cannot be assigned to multiple instances, the transactional.ids will be unique. If you are producing messages outside of the context of a container thread, the transactional.id (and hence prefix) must be unique across instances. If you are doing both, you will need 2 distinct producer factories.
#Override
#Transactional
public void create(List<Employee> employees){
for (Employee emp : employees) {
employeeRepository.save(emp);
String jsonStr = JsonUtil.toString(emp);
stringKafkaTemplate.send("employee", jsonStr);
}
}
So, since your transaction is on the producer side only, your transactionIdPrefix needs to be unique on each instance.

How to access MessageHeaders in Kafka batch listener

I have the below Autoconfiguration class for Kafka:
#Configuration
#EnableKafka
#ConditionalOnClass(KafkaReceiver.class)
#ConditionalOnProperty({"spring.kafka.bootstrap-servers"})
public class KafkaAutoConfiguration<T> { #Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
private KafkaProperties kafkaConfig;
private String groupId;
#Autowired
public void setKafkaProperties(KafkaProperties properties) {
this.kafkaConfig = properties;
}
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
bootstrapServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
org.apache.kafka.common.serialization.ByteArrayDeserializer.class);
props.put(ConsumerConfig.GROUP_ID_CONFIG, kafkaConfig.getGroupId());
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
return props;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
return factory;
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaBatchListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setBatchListener(true);
return factory;
}
#Bean
public DefaultKafkaHeaderMapper headerMapper(){
return new DefaultKafkaHeaderMapper();
}
#Bean("simpleReceiver")
#ConditionalOnMissingBean(name = "simpleReceiver")
#ConditionalOnProperty({"service.kafka.consumer.topics"})
public KafkaReceiver simpleReceiver() {
return new KafkaSimpleReceiver();
}
#Bean("batchReceiver")
#ConditionalOnMissingBean(name = "batchReceiver")
#ConditionalOnProperty({"service.kafka.consumer.batch-topics"})
public KafkaReceiver batchReceiver() {
return new KafkaBatchReceiver();
}
}
Simple listener bean:
public class KafkaSimpleReceiver implements KafkaReceiver {
#KafkaListener(topics = "#{'${service.kafka.consumer.topics}'.split(',')}", containerFactory = "kafkaListenerContainerFactory")
public void receive(ConsumerRecord record, #Headers MessageHeaders headers) throws KafkaException {
}
}
Batch listener bean:
public class KafkaBatchReceiver implements KafkaReceiver {
#KafkaListener(topics = "#{'${service.kafka.consumer.batch-topics}'.split(',')}", containerFactory = "kafkaBatchListenerContainerFactory")
public void receive(List<ConsumerRecord> records, #Headers MessageHeaders headers) throws KafkaException {
}
}
Simple listener is working fine, but I am getting the following error for batch listener. How can we access MessageHeaders in this case?
A parameter of type 'List<ConsumerRecord>' must be the only parameter (except for an optional 'Acknowledgment' and/or 'Consumer')
EDIT
This is what I did to convert ConsumerRecord to MessageHeaders
public void receive(List<ConsumerRecord> records) {
for(ConsumerRecord record : records) {
Map<String, Object> headersList = new HashMap<>();
for(final Header h : record.headers()) {
headersList.put(h.key(), new String(h.value()));
}
MessageHeaders headers = new MessageHeaders(headersList);
}
}
You can get a list of Message<?>.
#KafkaListener(topics = "so54086076", id = "so54086076")
public void listen(List<Message<?>> records) {
System.out.println(records.size() + ":" + records);
}
The message payloads will be the ConsumerRecord.value(); the other ConsumerRecord properties will be in the headers.

Unable to read same message on same topic by multiple consumers

I am publishing a message to topicX. To consume this message by multiple consumers, I have written following ConsumerConfig (Note, each consumer has different groupd_id.
#EnableKafka
#Configuration
public class ConsumerConfig {
#Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
#Bean
public Map<String, Object> consumerConfigs(String groupId) {
Map<String, Object> props = new HashMap<>();
props.put(BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(GROUP_ID_CONFIG, "my.groupid." + groupId);
props.put(AUTO_OFFSET_RESET_CONFIG, "earliest");
return props;
}
#Bean
public ConsumerFactory<String, Car> consumerFactory(String groupId) {
return new DefaultKafkaConsumerFactory<>(consumerConfigs(groupId), new StringDeserializer(),
new JsonDeserializer<>(Car.class));
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, Car> consumerOneKafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, Car> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory("one"));
return factory;
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, Car> consumerTwoKafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, Car> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory("two"));
return factory;
}
}
And then on specific consumer, I have added following annotation (specified the group id and container factory)
#KafkaListener(id = "my.groupid.one", topics = "${app.topic.topicname}", containerFactory = "consumerOneKafkaListenerContainerFactory")
But when i publish the message, only one consumer consumes it. How can i i configure kafka so that each published message is consumed by multiple consumers?
I have tried this code with 1 partition and n partitions (where n = number of consumers) and yet i am having this issue.
UPDATE
Here is my topic producer class
#Service
#Slf4j
public class TopicProducer {
#Autowired
private KafkaTemplate<String, TopicPayload> kafkaTemplate;
public void send(String topicName, TopicPayload payload) {
log.info("sending message={} to topic={}", payload, topicName);
kafkaTemplate.send(topicName, payload);
}
}
And here is my TopicProducerConfig
#Configuration
public class TopicProducerConfig {
#Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
#Bean
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
return props;
}
#Bean
public ProducerFactory<String, TopicPayload> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
#Bean
public KafkaTemplate<String, TopicPayload> kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
}
I post a message by calling producer.send(topicName, payload).

Resources