Transaction Synchronization in spring boot not working properly - spring-boot

Transaction synchronization and rollback are not working properly. And occasionally giving producerFencedException. is there any mistakes in my config or code..?
I have multiple instances of spring boot
1 docker broker
spring boot version: 2.1.4 Release
kafka sender config
#Configuration
#EnableKafka
public class KafkaSenderConfig{
#Value("${kafka.servers}")
private String kafkaServers;
#Value("${application.name}")
private String applicationName;
#Bean(value = "stringKafkaTransactionManager")
public KafkaTransactionManager<String, String> kafkaStringTransactionManager() {
KafkaTransactionManager<String, String> ktm = new KafkaTransactionManager<String, String>(stringProducerFactory());
ktm.setNestedTransactionAllowed(true);
ktm.setTransactionSynchronization(AbstractPlatformTransactionManager.SYNCHRONIZATION_ALWAYS);
return ktm;
}
#Bean(value = "stringProducerFactory")
#Primary
public ProducerFactory<String, String> stringProducerFactory() {
Map<String, Object> config = new ConcurrentHashMap<>();
config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
config.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
config.put(ProducerConfig.LINGER_MS_CONFIG, 100);
config.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
config.put(ProducerConfig.ACKS_CONFIG, "all");
DefaultKafkaProducerFactory<String, String> defaultKafkaProducerFactory = new DefaultKafkaProducerFactory<>(config);
defaultKafkaProducerFactory.setTransactionIdPrefix("sample-trans-");
return defaultKafkaProducerFactory;
}
#Bean(value = "stringKafkaTemplate")
#Primary
public KafkaTemplate<String, String> stringKafkaTemplate() {
return new KafkaTemplate<>(stringProducerFactory(),true);
}
#Bean(name = "chainedStringKafkaTransactionManager")
#Primary
public ChainedKafkaTransactionManager<String, String> chainedTransactionManager(JpaTransactionManager jpaTransactionManager, DataSourceTransactionManager dsTransactionManager) {
return new ChainedKafkaTransactionManager<>(kafkaStringTransactionManager(), jpaTransactionManager, dsTransactionManager);
}
}
kafka receiver config
#Configuration
#EnableKafka
public class KafkaReceiverConfig {
#Value("${kafka.servers}")
private String kafkaServers;
#Value("${kafka.groupId}")
private String groupId;
#Value("${kafka.retry.maxAttempts}")
private Integer retryMaxAttempts;
#Value("${kafka.retry.interval}")
private Long retryInterval;
#Value("${kafka.concurrency}")
private Integer concurrency;
#Value("${kafka.poll.timeout}")
private Integer pollTimeout;
#Value("${kafka.consumer.auto-offset-reset:earliest}")
private String offset = "earliest";
#Autowired
private PlatformTransactionManager transactionManager;
#Bean
public RetryPolicy retryPolicy() {
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
simpleRetryPolicy.setMaxAttempts(retryMaxAttempts);
return simpleRetryPolicy;
}
#Bean
public BackOffPolicy backOffPolicy() {
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(retryInterval);
return backOffPolicy;
}
#Bean
public RetryTemplate retryTemplate(){
RetryTemplate retryTemplate = new RetryTemplate();
retryTemplate.setRetryPolicy(retryPolicy());
retryTemplate.setBackOffPolicy(backOffPolicy());
return retryTemplate;
}
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<String, String>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(pollTimeout);
factory.getContainerProperties().setSyncCommits(true);
factory.setRetryTemplate(retryTemplate());
factory.getContainerProperties().setAckOnError(false);
factory.getContainerProperties().setTransactionManager(transactionManager);
return factory;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new ConcurrentHashMap<String, Object>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offset);
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
return props;
}
#Bean(name = { "jsonConsumerFactory" })
public ConsumerFactory<String, Object> jsonConsumerFactory() {
Map<String, Object> props = new ConcurrentHashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonSerializer.class);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
return new DefaultKafkaConsumerFactory<>(props);
}
#Bean(name = { "kafkaJsonListenerContainerFactory" })
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, Object>> kafkaJsonListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, Object> factory = new ConcurrentKafkaListenerContainerFactory<String, Object>();
factory.setConsumerFactory(jsonConsumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(pollTimeout);
factory.getContainerProperties().setSyncCommits(true);
return factory;
}
data source config
#Configuration
#EnableTransactionManagement
#EnableJpaRepositories(basePackages = "com.sample.entity.repository")
public class DatasourceConfig {
#Bean(name = "dataSourceProperties")
#ConfigurationProperties("spring.datasource")
public DataSourceProperties dataSourceProperties() {
return new DataSourceProperties();
}
#Bean(name = "datasource")
#Primary
public DataSource dataSource(#Qualifier("dataSourceProperties") DataSourceProperties properties) {
return properties.initializeDataSourceBuilder().type(HikariDataSource.class)
.build();
}
#Bean
public LocalContainerEntityManagerFactoryBean entityManagerFactory(#Qualifier("datasource") DataSource ds) throws PropertyVetoException {
LocalContainerEntityManagerFactoryBean entityManagerFactory = new LocalContainerEntityManagerFactoryBean();
entityManagerFactory.setDataSource(ds);
entityManagerFactory.setPackagesToScan(new String[]{"com.sample.entity.domain"});
JpaVendorAdapter jpaVendorAdapter = new HibernateJpaVendorAdapter();
entityManagerFactory.setJpaVendorAdapter(jpaVendorAdapter);
return entityManagerFactory;
}
#Bean
public DataSourceTransactionManager dsTransactionManager(#Qualifier("datasource") DataSource ds) {
return new DataSourceTransactionManager(ds);
}
#Bean
public PlatformTransactionManager transactionManager(EntityManagerFactory entityManagerFactory){
return jpaTransactionManager(entityManagerFactory);
}
#Bean
public JpaTransactionManager jpaTransactionManager(EntityManagerFactory entityManagerFactory){
JpaTransactionManager transactionManager = new JpaTransactionManager();
transactionManager.setEntityManagerFactory(entityManagerFactory);
return transactionManager;
}
#Bean
public JdbcTemplate jdbcTemplate(#Qualifier("datasource") DataSource dataSource) {
return new JdbcTemplate(dataSource);
}
}
producing a message with the transaction :
#Autowired
#Qualifier("stringKafkaTemplate")
private KafkaTemplate<String, String> stringKafkaTemplate;
#Autowired
private EmployeeRepository employeeRepository;
#Override
#Transactional
public void create(List<Employee> employees){
for (Employee emp : employees) {
employeeRepository.save(emp);
String jsonStr = JsonUtil.toString(emp);
stringKafkaTemplate.send("employee", jsonStr);
}
}
reciever
#KafkaListener(id = "employee", topics = "employee")
#Transactional(readOnly = false)
public void processRequest(#Payload String message) throws IOException {
/// its working fine
}
property file(Kafka config)
kafka.servers=localhost:9092
kafka.groupId=xyzabc
kafka.retry.maxAttempts=3
kafka.retry.interval=300000
kafka.concurrency=10
kafka.poll.timeout=1000

It appears that your listener is receiving Employee objects and your producer is creating them - i.e. you are not calling create() from the listener.
As I said in my comment to your other question yesterday...
If you are producing messages on a listener container thread, the transactional.id is <prefix><group>.<topic>.<partition>. Since a partition cannot be assigned to multiple instances, the transactional.ids will be unique. If you are producing messages outside of the context of a container thread, the transactional.id (and hence prefix) must be unique across instances. If you are doing both, you will need 2 distinct producer factories.
#Override
#Transactional
public void create(List<Employee> employees){
for (Employee emp : employees) {
employeeRepository.save(emp);
String jsonStr = JsonUtil.toString(emp);
stringKafkaTemplate.send("employee", jsonStr);
}
}
So, since your transaction is on the producer side only, your transactionIdPrefix needs to be unique on each instance.

Related

EmbeddedKafka does not invoke listener when using avro schema

I have been trying to write a simple Kafka Listener unit test using KafkaEmbedded. However my listener does not gets invoked. I have been using this link for inspiration since I also need an Avro Serializer/DeSerializer.
Below are how my test class looks like.
#ExtendWith(SpringExtension.class)
public class KafkaTest{
public static final String TOPIC_2 = "topic";
#Autowired
private Service listener;
#Autowired
private SomeClient someClient;
#Autowired
private KafkaTemplate<String, Avro> template;
#Test
void testSimple() {
template.send(TOPIC_2, "test", Avro.newBuilder()
.setGameProvider("abcd")
.setMessageName("someMessageName")
.setRequestId(UUID.randomUUID().toString())
.setBody(UUID.randomUUID().toString())
.build());
verify(someClient).register(anyString(), anyString(), anyString(), anyString());
template.flush();
}
#Configuration
#EnableKafka
public static class Config {
#Bean
public EmbeddedKafkaBroker kafkaEmbedded() {
return new EmbeddedKafkaBroker(1, true, 1, TOPIC_2);
}
#Bean
public ConsumerFactory<String, Avro> createConsumerFactory() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaEmbedded().getBrokersAsString());
props.put(ConsumerConfig.GROUP_ID_CONFIG, "group-1");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, CustomKafkaAvroDeserializer.class);
props.put("schema.registry.url", "not-used");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
return new DefaultKafkaConsumerFactory<>(props);
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, Avro> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, Avro> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(createConsumerFactory());
return factory;
}
#MockBean
private SomeClient client;
#MockBean
private Marshaller marshaller;
#Bean
public SomeService listener() {
return new SomeService(client, marshaller, kafkaTemplate());
}
#Bean
public ProducerFactory<String, Avro> producerFactory() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaEmbedded().getBrokersAsString());
props.put(ProducerConfig.RETRIES_CONFIG, 1);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, CustomKafkaAvroSerializer.class);
props.put("schema.registry.url", "not-used");
return new DefaultKafkaProducerFactory<>(props);
}
#Bean
public KafkaTemplate<String, Avro> kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
}
}
public class CustomKafkaAvroDeserializer extends KafkaAvroDeserializer {
#Override
public Object deserialize(String topic, byte[] bytes) {
if (topic.equals("topic")) {
this.schemaRegistry = getMockClient(SafeReport.SCHEMA$);
}
return super.deserialize(topic, bytes);
}
private static SchemaRegistryClient getMockClient(final Schema schema$) {
return new MockSchemaRegistryClient() {
#Override
public synchronized Schema getById(int id) {
return schema$;
}
};
}
}
public class CustomKafkaAvroSerializer extends KafkaAvroSerializer {
public CustomKafkaAvroSerializer() {
super();
super.schemaRegistry = new MockSchemaRegistryClient();
}
public CustomKafkaAvroSerializer(SchemaRegistryClient client) {
super(new MockSchemaRegistryClient());
}
public CustomKafkaAvroSerializer(SchemaRegistryClient client, Map<String, ?> props) {
super(new MockSchemaRegistryClient(), props);
}
}
#Service
#EnableBinding(Sink.class)
public class SomeService {
#KafkaListener(topics = "topic", groupId = "group-1")
public void listen(ConsumerRecord<String, Avro> cr) {
System.out.println(String.format("#### -> Consumed message -> %s", cr.toString()));
}
}
My listener is never invoked when I run this test.

Spring Kafka Listener - Transaction not working properly

I am running the latest version of spring boot app with spring Kafka and MySQL as database and used KafkaChainedTransactionManager for transaction synchronization.
I want to update the same entity object by a particular listener. so when the multiple messages are coming to the topic at a time for updating the same object, the transaction is not waiting until the transaction commits for the same object. so the data in becoming inconsistent.
I have tried using Pessimistic lock-in JPA repository query with no luck.
Listener where the data inconsistency happening when the multiple messages are receiving at the same for updating the same object
// method
#Autowired
private ProcessIndexRepository processIndexRepository;
#Transactional(readonly=false)
#KafkaListener(id = "update_process", topics = "update_process")
public update(#Payload String message){
ProcessModel processModel= JsonUtil.toObject(message,ProcessModel.class);
// from repository it will get old data instead of updated data
ProcessIndex process= processIndexRepository.findByProcessId(processModel.getId()).get();
if(processModel.getName()!=null){
process.set(processModel.getName())
}
if(processModel.getAge()>0){
processModel.setAge(processModel.getAge())
}
processIndexRepository.save(process);
}
Kafka sender config
#Configuration
#EnableKafka
public class KafkaSenderConfig{
#Value("${kafka.servers}")
private String kafkaServers;
#Value("${application.name}")
private String applicationName;
#Bean(value = "stringKafkaTransactionManager")
public KafkaTransactionManager<String, String> kafkaStringTransactionManager() {
KafkaTransactionManager<String, String> ktm = new KafkaTransactionManager<String, String>(stringProducerFactory());
ktm.setNestedTransactionAllowed(true);
ktm.setTransactionSynchronization(AbstractPlatformTransactionManager.SYNCHRONIZATION_ALWAYS);
return ktm;
}
#Bean(value = "stringProducerFactory")
#Primary
public ProducerFactory<String, String> stringProducerFactory() {
Map<String, Object> config = new ConcurrentHashMap<>();
config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
config.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
config.put(ProducerConfig.LINGER_MS_CONFIG, 100);
config.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
config.put(ProducerConfig.ACKS_CONFIG, "all");
DefaultKafkaProducerFactory<String, String> defaultKafkaProducerFactory = new DefaultKafkaProducerFactory<>(config);
defaultKafkaProducerFactory.setTransactionIdPrefix("sample-trans-");
return defaultKafkaProducerFactory;
}
#Bean(value = "stringKafkaTemplate")
#Primary
public KafkaTemplate<String, String> stringKafkaTemplate() {
return new KafkaTemplate<>(stringProducerFactory(),true);
}
#Bean(name = "chainedStringKafkaTransactionManager")
#Primary
public ChainedKafkaTransactionManager<String, String> chainedTransactionManager(JpaTransactionManager jpaTransactionManager, DataSourceTransactionManager dsTransactionManager) {
return new ChainedKafkaTransactionManager<>(kafkaStringTransactionManager(), jpaTransactionManager, dsTransactionManager);
}
}
Kafka receiver config
#Configuration
#EnableKafka
public class KafkaReceiverConfig {
#Value("${kafka.servers}")
private String kafkaServers;
#Value("${kafka.groupId}")
private String groupId;
#Value("${kafka.retry.maxAttempts}")
private Integer retryMaxAttempts;
#Value("${kafka.retry.interval}")
private Long retryInterval;
#Value("${kafka.concurrency}")
private Integer concurrency;
#Value("${kafka.poll.timeout}")
private Integer pollTimeout;
#Value("${kafka.consumer.auto-offset-reset:earliest}")
private String offset = "earliest";
#Autowired
private PlatformTransactionManager transactionManager;
#Bean
public RetryPolicy retryPolicy() {
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
simpleRetryPolicy.setMaxAttempts(retryMaxAttempts);
return simpleRetryPolicy;
}
#Bean
public BackOffPolicy backOffPolicy() {
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(retryInterval);
return backOffPolicy;
}
#Bean
public RetryTemplate retryTemplate(){
RetryTemplate retryTemplate = new RetryTemplate();
retryTemplate.setRetryPolicy(retryPolicy());
retryTemplate.setBackOffPolicy(backOffPolicy());
return retryTemplate;
}
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<String, String>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(pollTimeout);
factory.getContainerProperties().setSyncCommits(true);
factory.setRetryTemplate(retryTemplate());
factory.getContainerProperties().setAckOnError(false);
factory.getContainerProperties().setTransactionManager(transactionManager);
return factory;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new ConcurrentHashMap<String, Object>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offset);
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
return props;
}
#Bean(name = { "jsonConsumerFactory" })
public ConsumerFactory<String, Object> jsonConsumerFactory() {
Map<String, Object> props = new ConcurrentHashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonSerializer.class);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
return new DefaultKafkaConsumerFactory<>(props);
}
#Bean(name = { "kafkaJsonListenerContainerFactory" })
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, Object>> kafkaJsonListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, Object> factory = new ConcurrentKafkaListenerContainerFactory<String, Object>();
factory.setConsumerFactory(jsonConsumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(pollTimeout);
factory.getContainerProperties().setSyncCommits(true);
return factory;
}
data source config
#Configuration
#EnableTransactionManagement
#EnableJpaRepositories(basePackages = "com.sample.entity.repository")
public class DatasourceConfig {
#Bean(name = "dataSourceProperties")
#ConfigurationProperties("spring.datasource")
public DataSourceProperties dataSourceProperties() {
return new DataSourceProperties();
}
#Bean(name = "datasource")
#Primary
public DataSource dataSource(#Qualifier("dataSourceProperties") DataSourceProperties properties) {
return properties.initializeDataSourceBuilder().type(HikariDataSource.class)
.build();
}
#Bean
public LocalContainerEntityManagerFactoryBean entityManagerFactory(#Qualifier("datasource") DataSource ds) throws PropertyVetoException {
LocalContainerEntityManagerFactoryBean entityManagerFactory = new LocalContainerEntityManagerFactoryBean();
entityManagerFactory.setDataSource(ds);
entityManagerFactory.setPackagesToScan(new String[]{"com.sample.entity.domain"});
JpaVendorAdapter jpaVendorAdapter = new HibernateJpaVendorAdapter();
entityManagerFactory.setJpaVendorAdapter(jpaVendorAdapter);
return entityManagerFactory;
}
#Bean
public DataSourceTransactionManager dsTransactionManager(#Qualifier("datasource") DataSource ds) {
return new DataSourceTransactionManager(ds);
}
#Bean
public PlatformTransactionManager transactionManager(EntityManagerFactory entityManagerFactory){
return jpaTransactionManager(entityManagerFactory);
}
#Bean
public JpaTransactionManager jpaTransactionManager(EntityManagerFactory entityManagerFactory){
JpaTransactionManager transactionManager = new JpaTransactionManager();
transactionManager.setEntityManagerFactory(entityManagerFactory);
return transactionManager;
}
#Bean
public JdbcTemplate jdbcTemplate(#Qualifier("datasource") DataSource dataSource) {
return new JdbcTemplate(dataSource);
}
}
property file(Kafka config)
kafka.servers=localhost:9092
kafka.groupId=xyzabc
kafka.retry.maxAttempts=3
kafka.retry.interval=300000
kafka.concurrency=10
kafka.poll.timeout=1000
When updating the same object it should wait until the transaction commits then only the JPA repository should pick the updated object and update newer things
Your main culprit is threading below line.
kafka.concurrency=10
When multiple threads are beginning and committing the transaction for same object you should synchronize that code block else you may not be able to maintain the consistency. It will happen for any code where simultaneous updates are happing for same records.
You can avoid race conditions by using the partition key.

How to access MessageHeaders in Kafka batch listener

I have the below Autoconfiguration class for Kafka:
#Configuration
#EnableKafka
#ConditionalOnClass(KafkaReceiver.class)
#ConditionalOnProperty({"spring.kafka.bootstrap-servers"})
public class KafkaAutoConfiguration<T> { #Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
private KafkaProperties kafkaConfig;
private String groupId;
#Autowired
public void setKafkaProperties(KafkaProperties properties) {
this.kafkaConfig = properties;
}
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
bootstrapServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
org.apache.kafka.common.serialization.ByteArrayDeserializer.class);
props.put(ConsumerConfig.GROUP_ID_CONFIG, kafkaConfig.getGroupId());
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
return props;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
return factory;
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaBatchListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setBatchListener(true);
return factory;
}
#Bean
public DefaultKafkaHeaderMapper headerMapper(){
return new DefaultKafkaHeaderMapper();
}
#Bean("simpleReceiver")
#ConditionalOnMissingBean(name = "simpleReceiver")
#ConditionalOnProperty({"service.kafka.consumer.topics"})
public KafkaReceiver simpleReceiver() {
return new KafkaSimpleReceiver();
}
#Bean("batchReceiver")
#ConditionalOnMissingBean(name = "batchReceiver")
#ConditionalOnProperty({"service.kafka.consumer.batch-topics"})
public KafkaReceiver batchReceiver() {
return new KafkaBatchReceiver();
}
}
Simple listener bean:
public class KafkaSimpleReceiver implements KafkaReceiver {
#KafkaListener(topics = "#{'${service.kafka.consumer.topics}'.split(',')}", containerFactory = "kafkaListenerContainerFactory")
public void receive(ConsumerRecord record, #Headers MessageHeaders headers) throws KafkaException {
}
}
Batch listener bean:
public class KafkaBatchReceiver implements KafkaReceiver {
#KafkaListener(topics = "#{'${service.kafka.consumer.batch-topics}'.split(',')}", containerFactory = "kafkaBatchListenerContainerFactory")
public void receive(List<ConsumerRecord> records, #Headers MessageHeaders headers) throws KafkaException {
}
}
Simple listener is working fine, but I am getting the following error for batch listener. How can we access MessageHeaders in this case?
A parameter of type 'List<ConsumerRecord>' must be the only parameter (except for an optional 'Acknowledgment' and/or 'Consumer')
EDIT
This is what I did to convert ConsumerRecord to MessageHeaders
public void receive(List<ConsumerRecord> records) {
for(ConsumerRecord record : records) {
Map<String, Object> headersList = new HashMap<>();
for(final Header h : record.headers()) {
headersList.put(h.key(), new String(h.value()));
}
MessageHeaders headers = new MessageHeaders(headersList);
}
}
You can get a list of Message<?>.
#KafkaListener(topics = "so54086076", id = "so54086076")
public void listen(List<Message<?>> records) {
System.out.println(records.size() + ":" + records);
}
The message payloads will be the ConsumerRecord.value(); the other ConsumerRecord properties will be in the headers.

Sending object with Kafka / Spring Boot

I just want to send my BackAccount object with producer, and I want to consume that object. I have Producer and Consumer Config, but it gives me error.
My KafkaProducerConfig:
#Configuration
public class KafkaProducerConfig {
#Bean
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
return props;
}
#Bean
public ProducerFactory<String, BankAccount> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
#Bean
public KafkaTemplate<String, BankAccount> kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
}
KafkaConsumerConfig :
#Configuration
public class KafkaConsumerConfig {
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class);
props.put(ConsumerConfig.GROUP_ID_CONFIG, "json");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
return props;
}
#Bean
public ConsumerFactory<String, BankAccount> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(
consumerConfigs(),
new StringDeserializer(),
new JsonDeserializer<>(BankAccount.class));
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, BankAccount> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, BankAccount> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
return factory;
}
}
It is really basic but my controller is :
public class KafkaController {
#Autowired
private KafkaTemplate<String,BankAccount> kafkaTemplate;
private static final String TOPIC = "my-topic";
#GetMapping("/publish/{message}")
public String postKafka(#PathVariable("message") final String message){
BankAccount bankAccount= new BankAccount();
bankAccount.setBalance(120.0);
kafkaTemplate.send(TOPIC,bankAccount);
return "Succesfully";
}
}
I have error in KafkaConsumerConfig :
return new DefaultKafkaConsumerFactory<>(
consumerConfigs(),
new StringDeserializer(),
new JsonDeserializer<>(BankAccount.class));
in here : JsonDeserializer<>(BankAccount.class)
Diamond operator is not applicable for non-parametrized types
I did the same thing in reference link: https://www.codenotfound.com/spring-kafka-json-serializer-deserializer-example.html
Can someone help me?

Unable to read same message on same topic by multiple consumers

I am publishing a message to topicX. To consume this message by multiple consumers, I have written following ConsumerConfig (Note, each consumer has different groupd_id.
#EnableKafka
#Configuration
public class ConsumerConfig {
#Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
#Bean
public Map<String, Object> consumerConfigs(String groupId) {
Map<String, Object> props = new HashMap<>();
props.put(BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(GROUP_ID_CONFIG, "my.groupid." + groupId);
props.put(AUTO_OFFSET_RESET_CONFIG, "earliest");
return props;
}
#Bean
public ConsumerFactory<String, Car> consumerFactory(String groupId) {
return new DefaultKafkaConsumerFactory<>(consumerConfigs(groupId), new StringDeserializer(),
new JsonDeserializer<>(Car.class));
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, Car> consumerOneKafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, Car> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory("one"));
return factory;
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, Car> consumerTwoKafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, Car> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory("two"));
return factory;
}
}
And then on specific consumer, I have added following annotation (specified the group id and container factory)
#KafkaListener(id = "my.groupid.one", topics = "${app.topic.topicname}", containerFactory = "consumerOneKafkaListenerContainerFactory")
But when i publish the message, only one consumer consumes it. How can i i configure kafka so that each published message is consumed by multiple consumers?
I have tried this code with 1 partition and n partitions (where n = number of consumers) and yet i am having this issue.
UPDATE
Here is my topic producer class
#Service
#Slf4j
public class TopicProducer {
#Autowired
private KafkaTemplate<String, TopicPayload> kafkaTemplate;
public void send(String topicName, TopicPayload payload) {
log.info("sending message={} to topic={}", payload, topicName);
kafkaTemplate.send(topicName, payload);
}
}
And here is my TopicProducerConfig
#Configuration
public class TopicProducerConfig {
#Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
#Bean
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
return props;
}
#Bean
public ProducerFactory<String, TopicPayload> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
#Bean
public KafkaTemplate<String, TopicPayload> kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
}
I post a message by calling producer.send(topicName, payload).

Resources