Error - Rabbit Template publish Confirm - reply-code=403, reply-text=ACCESS_REFUSED - cannot publish to internal exchange - spring-boot

My Use Case is:
subscribe to Q1 and read messages in Batches of specified size.
Pass the read message collection for processing.
publish the collected messages to Q2 and ack message to Q1 upon sucessful confirmation of q2 publish.
Code
#Component
public class EPPQ2Subscriber {
private static final Logger LOGGER = LoggerFactory.getLogger(EPPQ2Subscriber.class);
#Autowired
RabbitMqConfig rabbitMqConfig;
#Autowired
AppConfig appConfig;
List<Message> messageList = new ArrayList<Message>();
List<Long> diliveryTag = new ArrayList<Long>();
/**
* Method is listener's receive message method , invoked when there is message ready to read
* #param message - Domain object of message encapsulated
* #param channel - rabitmq client channel
* #param messageId - #TODO Delete it later.
* #param messageProperties - amqp message properties contains message properties such as delivery tag etc..
*/
#RabbitListener(id="messageListener",queues = "#{rabbitMqConfig.getSubscriberQueueName()}",containerFactory="queueListenerContainer")
public void receiveMessage(Message message, Channel channel, #Header("id") String messageId,
MessageProperties messageProperties) {
LOGGER.info("Result:" + message.getClass() + ":" + message.toString());
if(messageList.size() <= appConfig.getSubscriberChunkSize() ) {
messageList.add(message);
diliveryTag.add(messageProperties.getDeliveryTag());
} else {
// call the service here to decrypt, read pan, call danger to scrub, encrypt pan and re-pack them in message again.
//after this branch messageList should have scrubbed and encrypted message objects ready to publish.
// Here is call for publish and ack messages..
}
}
}
#Component
#Configuration
public class TopicConfiguration {
private static final Logger LOGGER = LoggerFactory.getLogger(TopicConfiguration.class);
#Autowired
RabbitMqConfig rabbitMqConfig;
#Autowired EPPQ2Publisher eppQ2Publisher;
/**
* Caching connection factory
* #return CachingConnectionFactory
*/
#Bean
public CachingConnectionFactory connectionFactory() {
CachingConnectionFactory connectionFactory = new CachingConnectionFactory(rabbitMqConfig.getPublisherHosts(), rabbitMqConfig.getPublisherPort());
connectionFactory.setUsername(rabbitMqConfig.getPublisherUsername());
connectionFactory.setPassword(rabbitMqConfig.getPublisherPassword());
return connectionFactory;
}
/**
* Bean RabbitTemplate
* #return RabbitTemplate
*/
#Bean
public RabbitTemplate rabbitTemplate() {
final RabbitTemplate rabbitTemplate = new RabbitTemplate(connectionFactory());
rabbitTemplate.setMessageConverter(producerJackson2MessageConverter());
RetryTemplate retryTemplate = new RetryTemplate();
ExponentialBackOffPolicy backOffPolicy = new
ExponentialBackOffPolicy();
backOffPolicy.setInitialInterval(500);
backOffPolicy.setMultiplier(10.0);
backOffPolicy.setMaxInterval(10000);
retryTemplate.setBackOffPolicy(backOffPolicy);
rabbitTemplate.setRetryTemplate(retryTemplate);
/* rabbitTemplate.setExchange(rabbitMqConfig.getPublisherTopic());
rabbitTemplate.setRoutingKey(rabbitMqConfig.getRoutingKey());*/
rabbitTemplate.setConfirmCallback((correlation, ack, reason) ->
if(correlation != null ) {
LOGGER.info("Received " + (ack ? " ack " : " nack ") +
"for correlation: " + correlation);
if(ack) {
// this is confirmation received..
// here is code to ack Q1. correlation.getId and ack
eppQ2Publisher.ackMessage(new
Long(correlation.getId().toString()));
} else {
// no confirmation received and no need to do any
thing for retry..
}
}
});
rabbitTemplate.setReturnCallback((message, replyCode,
replyText, exchange, routingKey) ->
{
LOGGER.error("Returned: " + message + "\nreplyCode: " +
replyCode+ "\nreplyText: " + replyText +
"\nexchange/rk: " + exchange + "/" + routingKey);
});
return rabbitTemplate;
}
/**
* Bean Jackson2JsonMessageConverter
* #return Jackson2JsonMessageConverter
*/
#Bean
public Jackson2JsonMessageConverter producerJackson2MessageConverter() {
return new Jackson2JsonMessageConverter();
}
}
public interface EPPQ2Publisher {
public void sendMessage(Message msg,Long deliveryTag);
public void sendMessages(List<Message> msgList, Channel channel, List<Long> deliveryTagList);
public void ackMessage(Long deliveryTag);
}
#Component
public class EPPQ2PublisherImpl implements EPPQ2Publisher{
#Autowired
RabbitMqConfig rabbitMqConfig;
#Autowired
private RabbitTemplate rabbitTemplate;
private Channel channel;
/**
* Method sendMessage for sending individual scrubbed and encrypted message to publisher queue (Q2).
* #param msg - message domain object
* #param deliveryTag - is message delivery tag.
*/
#Override
public void sendMessage(Message msg,Long deliveryTag) {
rabbitTemplate.convertAndSend(rabbitMqConfig.getPublisherTopic(), rabbitMqConfig.getRoutingKey(), msg,new CorrelationData(deliveryTag.toString()));
}
/**
* sendMessages for sending list of scrubbed and encrypted messages to publisher queue (Q2)
* #param msgList - is list of scrubbed and encrypted messages
* #param channel - is ampq client channel
* #param deliveryTagList - is list of incoming message delivery tags.
*/
#Override
public void sendMessages(List<Message> msgList, Channel channel, List<Long>deliveryTagList) {
if(this.channel == null) {
this.channel = channel;
}
for (int i = 0 ; i < msgList.size(); i ++) {
sendMessage(msgList.get(i),deliveryTagList.get(i));
}
}
/**
* Method ackMessage for sending acknowledgement to subscriber Q1
* #param deliveryTag - is deliveryTag for each individual message.
*/
#Override
public void ackMessage(Long deliveryTag) {
try {
channel.basicAck(deliveryTag, false);
} catch (IOException e) {
e.printStackTrace();
}
}
org.springframework.amqp.rabbit.connection.CachingConnectionFactory Creating cached Rabbit Channel from AMQChannel(amqp://dftp_subscriber#10.15.190.18:5672/hydra.services,2)
I expected to be dftp_publisher and I guess my topic configuration is not injected properly.
Error Log:
org.springframework.amqp.rabbit.core.RabbitTemplate[0;39m: Executing callback RabbitTemplate$$Lambda$285/33478758 on RabbitMQ Channel: Cached Rabbit Channel: AMQChannel(amqp://dftp_subscriber#10.15.190.18:5672/hydra.services,2), conn: Proxy#1dc339f Shared Rabbit Connection: SimpleConnection#2bd7c8 [delegate=amqp://dftp_subscriber#10.15.190.18:5672/hydra.services, localPort= 55553]
org.springframework.amqp.rabbit.core.RabbitTemplate[0;39m: Publishing message (Body:'{"HEADER":{"RETRY_COUNT":0,"PUBLISH_EVENT_TYPE":"AUTH"},"PAYLOAD":{"MTI":"100","MTI_REQUEST":"100","PAN":"6011000000000000","PROCCODE":"00","PROCCODE_REQUEST":"00","FROM_ACCOUNT":"00","TO_ACCOUNT":"00","TRANSACTION_AMOUNT":"000000000100","TRANSMISSION_MMDDHHMMSS":"0518202930","STAN":"000001","LOCALTIME_HHMMSS":"010054","LOCALDATE_YYMMDD":"180522","EXPIRATION_DATE_YYMM":"2302","MERCHANT_TYPE":"5311","ACQUIRING_COUNTRY_CODE":"840","POS_ENTRY_MODE":"02","POS_PIN_ENTRY_CAPABILITIES":"0","FUNCTION_CODE":"100","ACQUIRING_ID_CODE":"000000","FORWARDING_ID_CODE":"000000","RETRIEVAL_REFERENCE_NUMBER":"1410N644D597","MERCHANT_NUMBER":"601100000000596","CARD_ACCEPTOR_NAME":"Discover Acq Simulator","CARD_ACCEPTOR_CITY":"Riverwoods","CARD_ACCEPTOR_STATE":"IL","CARD_ACCEPTOR_COUNTRY":"840","CARD_ACCEPTOR_COUNTRY_3NUMERIC":"840","NRID":"123456789012345","TRANSACTION_CURRENCY_CODE":"840","POS_TERMINAL_ATTENDANCE_INDICATOR":"0","POS_PARTIAL_APPROVAL_INDICATOR":"0","POS_TERMINAL_LOCATION_INDICATOR":"0","POS_TRANSACTION_STATUS_INDICATOR":"0","POS_ECOMMERCE_TRAN_INDICATOR":"0","POS_TYPE_OF_TERMINAL_DEVICE":"0","POS_CARD_PRESENCE_INDICATOR":"0","POS_CARD_CAPTURE_CAPABILITIES_INDICATOR":"0","POS_TRANSACTION_SECURITY_INDICATOR":"0","POS_CARD_DATA_TERMINAL_INPUT_CAPABILITY_INDICATOR":"C","POS_CARDHOLDER_PRESENCE_INDICATOR":"0","DFS_POS_DATA":"0000000000C00","GEODATA_STREET_ADDRESS":"2500 LAKE COOK ROAD ","GEODATA_POSTAL_CODE":"600150000","GEODATA_COUNTY_CODE":"840","GEODATA_STORE_NUMBER":"10001","GEODATA_MALL_NAME":"DISCOVER FINANCIAL SR","ISS_REFERENCE_ID":"72967956","ISS_PROCESSOR_REFERENCE_ID":"123459875","VERSION_INDICATOR":"03141"}}' MessageProperties [headers={TypeId=com.discover.dftp.scrubber.domain.Message}, contentType=application/json, contentEncoding=UTF-8, contentLength=1642, deliveryMode=PERSISTENT, priority=0, deliveryTag=0])on exchange [hydra.hash2Syphon.exc], routingKey = [100]
org.springframework.amqp.rabbit.connection.CachingConnectionFactory$DefaultChannelCloseLogger[0;39m: Channel shutdown: channel error; protocol method: #method(reply-code=403, reply-text=ACCESS_REFUSED - cannot publish to internal exchange 'hydra.hash2Syphon.exc' in vhost 'hydra.services', class-id=60, method-id=40)
EDIT 2.
#Component
#Configuration
public class ListenerContainerFactory {
static final Logger logger = LoggerFactory.getLogger(ListenerContainerFactory.class);
#Autowired
RabbitMqConfig rabbitMqConfig;
#Autowired
EPPQ2Subscriber receiver;
#Autowired
EPPQ2ChanelAwareSubscriber receiverChanel;
public ListenerContainerFactory(ConfigurableApplicationContext ctx) {
printContainerStartMsg();
}
private void printContainerStartMsg() {
logger.info("----------- Scrubber Container Starts --------------");
}
#Bean
public SimpleRabbitListenerContainerFactory queueListenerContainer(AbstractConnectionFactory connectionFactory,
MessageListenerAdapter listenerAdapter) {
connectionFactory.setAddresses(rabbitMqConfig.getSubscriberHosts());
connectionFactory.setVirtualHost("hydra.services");
connectionFactory.setPort(rabbitMqConfig.getSubscriberPort());
connectionFactory.setUsername(rabbitMqConfig.getSubscriberUsername());
connectionFactory.setPassword(rabbitMqConfig.getSubscriberPassword());
SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory();
factory.setConnectionFactory(connectionFactory);
factory.setErrorHandler(errorHandler());
return factory;
}
#Bean
MessageListenerAdapter listenerAdapter(EPPQ2Subscriber receiver) {
return new MessageListenerAdapter(receiver, "receiveMessage");
}
/*#Bean
MessageListenerAdapter listenerAdapterWithChanel(EPPQ2ChanelAwareSubscriber receiverChanel) {
return new MessageListenerAdapter(receiverChanel);
}*/
#Bean
public ErrorHandler errorHandler() {
return new ConditionalRejectingErrorHandler(fatalExceptionStrategy());
}
#Bean
public ScrubberFatalExceptionStrategy fatalExceptionStrategy() {
return new ScrubberFatalExceptionStrategy();
}
}
and Latest Topic Configuration.
#Component
#Configuration
public class TopicConfiguration {
private static final Logger LOGGER = LoggerFactory.getLogger(TopicConfiguration.class);
#Autowired
RabbitMqConfig rabbitMqConfig;
#Autowired EPPQ2Publisher eppQ2Publisher;
/**
* Bean Queue
* #return Queue
*/
#Bean
Queue queue() {
return new Queue(rabbitMqConfig.getPublisherQueueName(), false);
}
/**
* Bean TopicExchage
* #return TopicExchage
*/
#Bean
TopicExchange exchange() {
return new TopicExchange(rabbitMqConfig.getPublisherTopic());
}
/**
* Bean BindingBuilder
* #param queue - Queue
* #param exchange - TopicExchange
* #return BindingBuilder
*/
#Bean
Binding binding(Queue queue, TopicExchange exchange) {
return BindingBuilder.bind(queue).to(exchange).with(rabbitMqConfig.getRoutingKey());
}
/**
* Caching connection factory
* #return CachingConnectionFactory
*/
#Bean
public CachingConnectionFactory cachingConnectionFactory() {
CachingConnectionFactory connectionFactory = new CachingConnectionFactory(rabbitMqConfig.getPublisherHosts(),
rabbitMqConfig.getPublisherPort());
connectionFactory.setUsername(rabbitMqConfig.getPublisherUsername());
connectionFactory.setPassword(rabbitMqConfig.getPublisherPassword());
return connectionFactory;
}
/**
* Bean RabbitTemplate
* #return RabbitTemplate
*/
#Bean
public RabbitTemplate rabbitTemplate() {
final RabbitTemplate rabbitTemplate = new RabbitTemplate(cachingConnectionFactory());
rabbitTemplate.setMessageConverter(producerJackson2MessageConverter());
RetryTemplate retryTemplate = new RetryTemplate();
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
backOffPolicy.setInitialInterval(500);
backOffPolicy.setMultiplier(10.0);
backOffPolicy.setMaxInterval(10000);
retryTemplate.setBackOffPolicy(backOffPolicy);
rabbitTemplate.setRetryTemplate(retryTemplate);
/* rabbitTemplate.setExchange(rabbitMqConfig.getPublisherTopic());
rabbitTemplate.setRoutingKey(rabbitMqConfig.getRoutingKey());*/
rabbitTemplate.setConfirmCallback((correlation, ack, reason) -> {
if(correlation != null ) {
LOGGER.info("Received " + (ack ? " ack " : " nack ") + "for correlation: " + correlation);
if(ack) {
// this is confirmation received..
// here is code to ack Q1. correlation.getId() and ack
eppQ2Publisher.ackMessage(new
Long(correlation.getId().toString()));
} else {
// no confirmation received and no need to do any
}
}
});
rabbitTemplate.setReturnCallback(
(message, replyCode, replyText,
exchange, routingKey) ->
{
LOGGER.error("Returned: " + message + "\nreplyCode: " +
replyCode
+ "\nreplyText: " + replyText + "\nexchange/rk: " +
exchange + "/" + routingKey);
});
return rabbitTemplate;
}
/**
* Bean Jackson2JsonMessageConverter
* #return Jackson2JsonMessageConverter
*/
#Bean
public Jackson2JsonMessageConverter producerJackson2MessageConverter() {
return new Jackson2JsonMessageConverter();
}
}

It's not clear what you are asking. If you mean the subscriber user doesn't have permissions to write to that exchange, your wiring is wrong.
You don't show the subscriber configuration.
Is it possible the subscriber connection factory bean is also called connectionFactory? In that case one or the other will win.
They need different bean named.

Please check the permissions for your user if while initiating the consumer/producer we don't have the exchange name it will fallback to default

Related

How to read the messages from rabbitmq other than using listener configuration?

I am attempting to implement Spring Boot API to fetch the RabbitMQ Messages on demand for asynchronous cart notifications in UI. I already have a working implementation with the help of the Registered listener method. But I am looking for an alternative with or without spring.
#Component
public class Receiver {
private CountDownLatch latch = new CountDownLatch(1);
public void receiveMessage(String message) {
System.out.println("Received <" + message + ">");
latch.countDown();
}
public CountDownLatch getLatch() {
return latch;
}
}
Main Class with Reciever Configuration:
#SpringBootApplication
public class MessagingRabbitmqApplication {
static final String topicExchangeName = "spring-boot-exchange";
static final String queueName = "spring-boot";
#Bean
Queue queue() {
return new Queue(queueName, false);
}
#Bean
TopicExchange exchange() {
return new TopicExchange(topicExchangeName);
}
#Bean
Binding binding(Queue queue, TopicExchange exchange) {
return BindingBuilder.bind(queue).to(exchange).with("foo.bar.#");
}
#Bean
SimpleMessageListenerContainer container(ConnectionFactory connectionFactory,
MessageListenerAdapter listenerAdapter) {
SimpleMessageListenerContainer container = new SimpleMessageListenerContainer();
container.setConnectionFactory(connectionFactory);
container.setQueueNames(queueName);
container.setMessageListener(listenerAdapter);
return container;
}
#Bean
MessageListenerAdapter listenerAdapter(Receiver receiver) {
return new MessageListenerAdapter(receiver, "receiveMessage");
}
public static void main(String[] args) throws InterruptedException {
SpringApplication.run(MessagingRabbitmqApplication.class, args).close();
}
}
My Current Implementation Reference is from: https://spring.io/guides/gs/messaging-rabbitmq/
See RabbitTemplate API:
/**
* Receive a message if there is one from a specific queue. Returns immediately,
* possibly with a null value.
*
* #param queueName the name of the queue to poll
* #return a message or null if there is none waiting
* #throws AmqpException if there is a problem
*/
#Nullable
Message receive(String queueName) throws AmqpException;
/**
* Receive a message if there is one from a specific queue and convert it to a Java
* object. Returns immediately, possibly with a null value.
*
* #param queueName the name of the queue to poll
* #return a message or null if there is none waiting
* #throws AmqpException if there is a problem
*/
#Nullable
Object receiveAndConvert(String queueName) throws AmqpException;
And respective docs: https://docs.spring.io/spring-amqp/reference/html/#polling-consumer

How to commit offset in spring Kafka if the message failed and processed by AfterRollbackProcessor

I am using spring boot 2.1.9 with spring Kafka 2.2.9.
If the message failed a number of times (defined in the afterRollbackProcessor), The consumer stops polling the record. but if the consumer restarted, it again re-poll the same message and processes.
But I don't want the messages to be re-polled again, What is the best way to stop it?
here is my config
#Configuration
#EnableKafka
public class KafkaReceiverConfig {
// Kafka Server Configuration
#Value("${kafka.servers}")
private String kafkaServers;
// Group Identifier
#Value("${kafka.groupId}")
private String groupId;
// Kafka Max Retry Attempts
#Value("${kafka.retry.maxAttempts:5}")
private Integer retryMaxAttempts;
// Kafka Max Retry Interval
#Value("${kafka.retry.interval:180000}")
private Long retryInterval;
// Kafka Concurrency
#Value("${kafka.concurrency:10}")
private Integer concurrency;
// Kafka Concurrency
#Value("${kafka.poll.timeout:300}")
private Integer pollTimeout;
// Kafka Consumer Offset
#Value("${kafka.consumer.auto-offset-reset:earliest}")
private String offset = "earliest";
#Value("${kafka.max.records:100}")
private Integer maxPollRecords;
#Value("${kafka.max.poll.interval.time:500000}")
private Integer maxPollIntervalMs;
#Value("${kafka.max.session.timeout:60000}")
private Integer sessionTimoutMs;
// Logger
private static final Logger log = LoggerFactory.getLogger(KafkaReceiverConfig.class);
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory(
ChainedKafkaTransactionManager<String, String> chainedTM, MessageProducer messageProducer) {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(pollTimeout);
factory.getContainerProperties().setAckMode(AckMode.RECORD);
factory.getContainerProperties().setSyncCommits(true);
factory.getContainerProperties().setAckOnError(false);
factory.getContainerProperties().setTransactionManager(chainedTM);
AfterRollbackProcessor<String, String> afterRollbackProcessor = new DefaultAfterRollbackProcessor<>(
(record, exception) -> {
log.warn("failed to process kafka message (retries are exausted). topic name:" + record.topic()
+ " value:" + record.value());
messageProducer.saveFailedMessage(record, exception);
}, retryMaxAttempts);
factory.setAfterRollbackProcessor(afterRollbackProcessor);
log.debug("Kafka Receiver Config kafkaListenerContainerFactory created");
return factory;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
log.debug("Kafka Receiver Config consumerFactory created");
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new ConcurrentHashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollIntervalMs);
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimoutMs);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offset);
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
log.debug("Kafka Receiver Config consumerConfigs created");
return props;
}
}
How can I achieve this?
Set the commitRecovered property to true and inject a KafkaTemplate configured with the same producer factory as the transaction manager.
/**
* {#inheritDoc}
* Set to true and the container will run the
* {#link #process(List, Consumer, Exception, boolean)} method in a transaction and,
* if a record is skipped and recovered, we will send its offset to the transaction.
* Requires a {#link KafkaTemplate}.
* #param commitRecovered true to process in a transaction.
* #since 2.2.5
* #see #isProcessInTransaction()
* #see #process(List, Consumer, Exception, boolean)
* #see #setKafkaTemplate(KafkaTemplate)
*/
#Override
public void setCommitRecovered(boolean commitRecovered) { // NOSONAR enhanced javadoc
super.setCommitRecovered(commitRecovered);
}
EDIT
Here's the logic in process...
if (SeekUtils.doSeeks(((List) records), consumer, exception, recoverable,
getSkipPredicate((List) records, exception), this.logger)
&& isCommitRecovered() && this.kafkaTemplate != null && this.kafkaTemplate.isTransactional()) {
// if we get here it means retries are exhausted and we've skipped
ConsumerRecord<K, V> skipped = records.get(0);
this.kafkaTemplate.sendOffsetsToTransaction(
Collections.singletonMap(new TopicPartition(skipped.topic(), skipped.partition()),
new OffsetAndMetadata(skipped.offset() + 1)));
}
EDIT2
In 2.2.x, the property is
/**
* Set to true to run the {#link #process(List, Consumer, Exception, boolean)}
* method in a transaction. Requires a {#link KafkaTemplate}.
* #param processInTransaction true to process in a transaction.
* #since 2.2.5
* #see #process(List, Consumer, Exception, boolean)
* #see #setKafkaTemplate(KafkaTemplate)
*/
public void setProcessInTransaction(boolean processInTransaction) {
this.processInTransaction = processInTransaction;
}

auto-commit of offsets Failed & Retry also not working as excepted

I am using spring boot 2.1.9 with spring Kafka 2.2.9
I am getting some warning in logs file which says commit failed and also i am using SeekToCurrentErrorHandler to capture the error once retry exausted , but sometimes if commits failed its keeps on iterating.
here is my config class
#Configuration
#EnableKafka
public class KafkaReceiverConfig {
// Kafka Server Configuration
#Value("${kafka.servers}")
private String kafkaServers;
// Group Identifier
#Value("${kafka.groupId}")
private String groupId;
// Kafka Max Retry Attempts
#Value("${kafka.retry.maxAttempts:5}")
private Integer retryMaxAttempts;
// Kafka Max Retry Interval
#Value("${kafka.retry.interval:180000}")
private Long retryInterval;
// Kafka Concurrency
#Value("${kafka.concurrency:10}")
private Integer concurrency;
// Kafka Concurrency
#Value("${kafka.poll.timeout:100}")
private Integer pollTimeout;
// Kafka Consumer Offset
#Value("${kafka.consumer.auto-offset-reset:earliest}")
private String offset = "earliest";
// Logger
private static final Logger log = LoggerFactory.getLogger(KafkaReceiverConfig.class);
/**
* Defines the Max Number of Retry Attempts
*
* #return Return the Retry Policy #see {#link RetryPolicy}
*/
#Bean
public RetryPolicy retryPolicy() {
SimpleRetryPolicy simpleRetryPolicy = new SimpleRetryPolicy();
simpleRetryPolicy.setMaxAttempts(retryMaxAttempts);
return simpleRetryPolicy;
}
/**
* Time before the next Retry can happen, the Time used is in Milliseconds
*
* #return Return the BackOff Policy #see {#link BackOffPolicy}
*/
#Bean
public BackOffPolicy backOffPolicy() {
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(retryInterval);
return backOffPolicy;
}
/**
* Get Retry Template
*
* #return Return the Retry Template #see {#link RetryTemplate}
*/
#Bean
public RetryTemplate retryTemplate() {
RetryTemplate retryTemplate = new RetryTemplate();
retryTemplate.setRetryPolicy(retryPolicy());
retryTemplate.setBackOffPolicy(backOffPolicy());
return retryTemplate;
}
/**
* String Kafka Listener Container Factor
*
* #return #see {#link KafkaListenerContainerFactory}
*/
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory(
ChainedKafkaTransactionManager<String, String> chainedTM, MessageProducer messageProducer) {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<String, String>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(pollTimeout);
factory.getContainerProperties().setSyncCommits(true);
factory.setRetryTemplate(retryTemplate());
factory.getContainerProperties().setAckOnError(false);
factory.getContainerProperties().setTransactionManager(chainedTM);
factory.setStatefulRetry(true);
// NOTE: retryMaxAttempts should always +1 due to spring kafka bug
SeekToCurrentErrorHandler errorHandler = new SeekToCurrentErrorHandler((record, exception) -> {
log.warn("failed to process kafka message (retries are exausted). topic name:"+record.topic()+" value:"+record.value());
messageProducer.saveFailedMessage(record, exception);
}, retryMaxAttempts + 1);
factory.setErrorHandler(errorHandler);
log.debug("Kafka Receiver Config kafkaListenerContainerFactory created");
return factory;
}
/**
* String Consumer Factory
*
* #return #see {#link ConsumerFactory}
*/
#Bean
public ConsumerFactory<String, String> consumerFactory() {
log.debug("Kafka Receiver Config consumerFactory created");
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
/**
* Consumer Configurations
*
* #return #see {#link Map}
*/
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new ConcurrentHashMap<String, Object>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
// Disable the Auto Commit if required for testing
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offset);
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
log.debug("Kafka Receiver Config consumerConfigs created");
return props;
}
}
here is log :
2019-10-30 15:48:05.907 WARN [xxxxx-component-workflow-starter,,,] 11 --- [nt_create-2-C-1] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-4, groupId=fulfillment_create] Synchronous auto-commit of offsets {fulfillment_create-4=OffsetAndMetadata{offset=32, metadata=''}} failed: Commit cannot be completed since the group has already rebalanced and assigned the partitions to another member. This means that the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time message processing. You can address this either by increasing the session timeout or by reducing the maximum size of batches returned in poll() with max.poll.records.
Is there any problem with my config file?
how to set max poll and session timeout and all? (give me on example)
How to setup SeekToCurrentErrorHandler in spring Kafka 2.2.9 so that it works well (because I cannot upgrade spring Kafka due to some other dependencies)?
You are taking too long to process the records returned by the poll().
You need to reduce max.poll.records (ConsumerConfig.MAX_POLL_RECORDS_CONFIG) and/or increase max.poll.interval.ms.
You can't perform a seek after this error - you have lost the partitions.

How to write Integration Test for rabbit template with confirm and return callback

I have publisher that publish the message to exchange and associated rabbit template is configured to have confirm and return call back. I just would like to know How do I write integration test for this class using mock or any other frame work.
I have publisher that publish the message to exchange and associated rabbit template is configured to have confirm and return call back. I just would like to know How do I write integration test for this class using mock or any other frame work.
Code :
#Configuration
public class TopicConfiguration {
private static final Logger LOGGER = LoggerFactory.getLogger(TopicConfiguration.class);
#Autowired
RabbitMqConfig rabbitMqConfig;
#Autowired
EPPQ2Publisher eppQ2Publisher;
/**
* Caching connection factory
* #return CachingConnectionFactory
*/
#Bean
public CachingConnectionFactory cachingConnectionFactory() {
CachingConnectionFactory cachingConnectionFactory = new CachingConnectionFactory(rabbitMqConfig.getPublisherHosts(),
rabbitMqConfig.getPublisherPort());
cachingConnectionFactory.setUsername(rabbitMqConfig.getPublisherUsername());
cachingConnectionFactory.setPassword(rabbitMqConfig.getPublisherPassword());
cachingConnectionFactory.setVirtualHost("hydra.services");
cachingConnectionFactory.createConnection();
cachingConnectionFactory.setPublisherReturns(true);
cachingConnectionFactory.setPublisherConfirms(true);
cachingConnectionFactory.setConnectionNameStrategy(f -> "publisherConnection");
return cachingConnectionFactory;
}
/**
* Bean RabbitTemplate
* #return RabbitTemplate
*/
#Bean
public RabbitTemplate template(
#Qualifier("cachingConnectionFactory") CachingConnectionFactory cachingConnectionFactory) {
final RabbitTemplate rabbitTemplate = new RabbitTemplate(cachingConnectionFactory);
rabbitTemplate.setMessageConverter(producerJackson2MessageConverter());
RetryTemplate retryTemplate = new RetryTemplate();
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
backOffPolicy.setInitialInterval(500);
backOffPolicy.setMultiplier(10.0);
backOffPolicy.setMaxInterval(10000);
retryTemplate.setBackOffPolicy(backOffPolicy);
rabbitTemplate.setRetryTemplate(retryTemplate);
rabbitTemplate.setExchange(rabbitMqConfig.getPublisherTopic());
rabbitTemplate.setUsePublisherConnection(true);
rabbitTemplate.setMandatory(true);
rabbitTemplate.setConfirmCallback((correlation, ack, reason) -> {
if (correlation != null) {
LOGGER.info("Received " + (ack ? " ack " : " nack ") + "for correlation: " + correlation);
if (ack) {
// this is confirmation received..
// here is code to ack Q1. correlation.getId() and ack it !!
eppQ2Publisher.ackMessage(new Long(correlation.getId().toString()));
} else {
// no confirmation received and no need to do any thing for
// retry..
}
}
});
rabbitTemplate.setReturnCallback((message, replyCode, replyText, exchange, routingKey) -> {
LOGGER.error("Returned: " + message + "\nreplyCode: " + replyCode + "\nreplyText: " + replyText
+ "\nexchange/rk: " + exchange + "/" + routingKey);
});
return rabbitTemplate;
}
/**
* Bean Jackson2JsonMessageConverter
* #return Jackson2JsonMessageConverter
*/
#Bean
public Jackson2JsonMessageConverter producerJackson2MessageConverter() {
return new Jackson2JsonMessageConverter();
}
}
````java
#Component
public class EPPQ2PublisherImpl implements EPPQ2Publisher{
#Autowired
RabbitMqConfig rabbitMqConfig;
#Autowired
private RabbitTemplate rabbitTemplate;
private Channel channel;
/**
* Method sendMessage for sending individual scrubbed and encrypted message to publisher queue (Q2).
* #param msg - message domain object
* #param deliveryTag - is message delivery tag.
*/
#Override
public void sendMessage(Message msg,Long deliveryTag) {
rabbitTemplate.convertAndSend(rabbitMqConfig.getRoutingKey(), msg,new CorrelationData(deliveryTag.toString()));
}
/**
* sendMessages for sending list of scrubbed and encrypted messages to publisher queue (Q2)
* #param msgList - is list of scrubbed and encrypted messages
* #param channel - is ampq client channel
* #param deliveryTagList - is list of incoming message delivery tags.
*/
#Override
public void sendMessages(List<Message> msgList, Channel channel, List<Long>deliveryTagList) {
if(this.channel == null) {
this.channel = channel;
}
for (int i = 0 ; i < msgList.size(); i ++) {
sendMessage(msgList.get(i),deliveryTagList.get(i));
}
}
/**
* Method ackMessage for sending acknowledgement to subscriber queue (Q1).
* #param deliveryTag - is deliveryTag for each individual message.
*/
#Override
public void ackMessage(Long deliveryTag) {
try {
channel.basicAck(deliveryTag, false);
} catch (IOException e) {
e.printStackTrace();
}
}
}
could some one guide me with reference or example is available that would be great.
Some junit and integration testing for rabbit template with confirm and returns would be a great help.
There are lots of test cases in the framework itself...
RabbitTemplatePublisherCallbacksIntegrationTests
and RabbitTemplatePublisherCallbacksIntegrationTests2
and RabbitTemplatePublisherCallbacksIntegrationTests3

Custom MessageConverter with Spring JmsMessagingTemplate is not working as I expected

I'm trying to attach a custom message converter that implements org.springframework.jms.support.converter.MessageConverter, to a JmsMessagingTemplate.
I've read somewhere that we can attach the message converter to a MessagingMessageConverter by calling setPayloadConverter, and then attach that messaging message converter to the JmsMessagingTemplate via setJmsMessageConverter. After that, I call convertAndSend, but I notice that it doesn't convert the payload.
When I debugged the code, I notice that setting Jms Message Converter doesn't set the converter instance variable in the JmsMessagingTemplate. So when the convertAndSend method calls doConvert and tries to getConverter, it is getting the default simple message converter and not my custom one.
My question is, can I use an implementation of org.springframework.jms.support.converter.MessageConverter with a JmsMessagingTemplate? Or do I need to use an implementation of org.springframework.messaging.converter.MessageConverter?
I'm using Spring Boot 1.4.1.RELEASE, and Spring 4.3.3.RELEASE. The code is below.
Configuration
#Configuration
#EnableJms
public class MessagingEncryptionPocConfig {
/**
* Listener ActiveMQ Connection Factory
*/
#Bean(name="listenerActiveMqConnectionFactory")
public ActiveMQConnectionFactory listenerActiveMqConnectionFactory() {
return new ActiveMQConnectionFactory("admin","admin","tcp://localhost:61616");
}
/**
* Producer ActiveMQ Connection Factory
*/
#Bean(name="producerActiveMqConnectionFactory")
public ActiveMQConnectionFactory producerActiveMqConnectionFactory() {
return new ActiveMQConnectionFactory("admin","admin","tcp://localhost:61616");
}
/**
* Caching Connection Factory
*/
#Bean
public CachingConnectionFactory cachingConnectionFactory(#Qualifier("producerActiveMqConnectionFactory") ActiveMQConnectionFactory activeMqConnectionFactory) {
return new CachingConnectionFactory(activeMqConnectionFactory);
}
/**
* JMS Listener Container Factory
*/
#Bean
public DefaultJmsListenerContainerFactory jmsListenerContainerFactory(#Qualifier("listenerActiveMqConnectionFactory") ActiveMQConnectionFactory connectionFactory, MessagingMessageConverter messageConverter) {
DefaultJmsListenerContainerFactory defaultJmsListenerContainerFactory = new DefaultJmsListenerContainerFactory();
defaultJmsListenerContainerFactory.setConnectionFactory(connectionFactory);
defaultJmsListenerContainerFactory.setMessageConverter(messageConverter);
return defaultJmsListenerContainerFactory;
}
/**
* Jms Queue Template
*/
#Bean(name="queueTemplate")
public JmsMessagingTemplate queueTemplate(CachingConnectionFactory cachingConnectionFactory, MessageConverter messagingMessageConverter) {
JmsMessagingTemplate queueTemplate = new JmsMessagingTemplate(cachingConnectionFactory);
queueTemplate.setJmsMessageConverter(messagingMessageConverter);
return queueTemplate;
}
#Bean
public MessageConverter encryptionDecryptionMessagingConverter(Jaxb2Marshaller jaxb2Marshaller) {
MessageConverter encryptionDecryptionMessagingConverter = new EncryptionDecryptionMessagingConverter(jaxb2Marshaller);
MessagingMessageConverter messageConverter = new MessagingMessageConverter();
messageConverter.setPayloadConverter(encryptionDecryptionMessagingConverter);
return messageConverter;
}
/**
* Jaxb marshaller
*/
#Bean(name="producerJaxb2Marshaller")
public Jaxb2Marshaller jaxb2Marshaller() {
Jaxb2Marshaller jaxb2Marshaller = new Jaxb2Marshaller();
jaxb2Marshaller.setPackagesToScan("com.schema");
return jaxb2Marshaller;
}
}
MessageProducer Class
#Component
public class MessageProducer {
private static final Logger LOG = LoggerFactory.getLogger(MessageProducer.class);
#Autowired
#Qualifier("queueTemplate")
private JmsMessagingTemplate queueTemplate;
public void publishMsg(Transaction trx, Map<String,Object> jmsHeaders, MessagePostProcessor postProcessor) {
LOG.info("Sending Message. Payload={} Headers={}",trx,jmsHeaders);
queueTemplate.convertAndSend("queue.source", trx, jmsHeaders, postProcessor);
}
}
Unit Test
#RunWith(SpringRunner.class)
#SpringBootTest
#ActiveProfiles("test")
public class WebsMessagingEncryptionPocApplicationTests {
#Autowired
private MessageProducer producer;
#Autowired
private MessageListener messageListener;
/**
* Ensure that a message is sent, and received.
*/
#Test
public void testProducer() throws Exception{
//ARRANGE
CountDownLatch latch = new CountDownLatch(1);
messageListener.setCountDownLatch(latch);
Transaction trx = new Transaction();
trx.setCustomerAccountID(new BigInteger("111111"));
Map<String,Object> jmsHeaders = new HashMap<String,Object>();
jmsHeaders.put("tid", "1234563423");
MessagePostProcessor encryptPostProcessor = new EncryptMessagePostProcessor();
//ACT
producer.publishMsg(trx, jmsHeaders, encryptPostProcessor);
latch.await();
//ASSERT - assertion done in the consumer
}
}
The converter field is used to convert your input params to a spring-messaging Message<?>.
The JMS converter is used later (in MessagingMessageCreator) to then create a JMS Message from the messaging Message<?>.

Resources