activeMq redeliveryPolicy ignored when passed from tomee to spring - spring

i have a simple spring app deployed in tomee 1.7.1 with activeMq 5.10.
my issue is that the redelivery policy i set seems to be ignored mainly the delay on redelivery.
Im my jms listener i immediately throw an exception to test the automatic retries.
my activemq.xml is this:
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:amq="http://activemq.apache.org/schema/core"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="
http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd
http://activemq.apache.org/schema/core
http://activemq.apache.org/schema/core/activemq-core.xsd
">
<bean id="propertyConfigurer" class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer" />
<!--
The <broker> element is used to configure the ActiveMQ broker.
-->
<amq:broker xmlns="http://activemq.apache.org/schema/core" useShutdownHook="false" enableStatistics="true" brokerName="bima" useJmx="true" tmpDataDirectory="${catalina.base}/activemq-data/localhost/tmp_storage" populateJMSXUserID="true" useAuthenticatedPrincipalForJMSXUserID="true" schedulerSupport="true">
<amq:connectionFactory id="jmsRedeliverConnectionFactory" brokerURL="vm://localhost">
<amq:redeliveryPolicy>
<amq:redeliveryPolicy maximumRedeliveries="5" initialRedeliveryDelay="1000" redeliveryUseExponentialBackOff ="true" backOffMultiplier="5" />
</amq:redeliveryPolicy>
<amq:persistenceAdapter>
<amq:kahaDB directory="${catalina.base}/activemq-data/kahadb" checkForCorruptJournalFiles="true" checksumJournalFiles="true" journalMaxFileLength="32mb"/>
</amq:persistenceAdapter>
<managementContext>
<managementContext createConnector="false"/>
</managementContext>
</amq:broker>
And my tomee.xml looks like this:
<tomee>
<Resource id="ActiveMQResourceAdapter" type="ActiveMQResourceAdapter">
BrokerXmlConfig=broker:(vm://localhost)
<!-- ServerUrl=vm://localhost -->
</Resource>
<!-- see http://tomee.apache.org/containers-and-resources.html -->
<Resource id="resources/jms/ConnectionFactory" type="javax.jms.ConnectionFactory">
ResourceAdapter = ActiveMQResourceAdapter
BrokerURL = vm://localhost
maximumRedeliveries 3
redeliveryBackOffMultiplier 2
redeliveryUseExponentialBackOff true
initialRedeliveryDelay 5000
<!-- BrokerUrl = tcp://localhost:61616 -->
</Resource>
<Resource id="resources/jms/XAConnectionFactory" class-name="org.apache.activemq.ActiveMQXAConnectionFactory">
BrokerURL = vm://localhost
</Resource>
<Resource id="resources/jms/PrintQueue" type="javax.jms.Queue"/>
<Resource id="testxa" class-name="com.mysql.jdbc.jdbc2.optional.MysqlXADataSource">
Url jdbc:mysql://localhost:3306/test?autoReconnect=true
User root
</Resource>
<Resource id="movieDatabase" type="DataSource">
XaDataSource testxa
DataSourceCreator dbcp
UserName root
</Resource>
<!-- activate next line to be able to deploy applications in apps -->
<Deployments dir="apps" />
</tomee>
and my jms beans in my aplication context looks like this:
<bean id="StartQueue" class="org.springframework.jms.core.JmsTemplate">
<property name="connectionFactory">
<ref local="jmsFactory" />
</property>
<property name="defaultDestinationName" value="StartQueue" />
<property name="deliveryPersistent" value="true"/>
<property name="explicitQosEnabled" value="true"/>
</bean>
<bean id="starter" class="org.superbiz.mdb.Start"/>
<jms:listener-container container-type="default" connection-factory="jmsFactory" cache="none" acknowledge="auto" transaction-manager="transactionManager" concurrency="1" >
<jms:listener destination="StartQueue" ref="starter" />
</jms:listener-container>
once i added the maximumRedeliveries 3 to the tomee xml that started to work but the redeliveries all happen with no delay. So something is ignoring the useExponentialBackOff and the initialRedeliveryDelay or am i missing setting it somewhere?
EDIT:
So i configured tomee to use an external broker activemq 5.10.0 with the broker config
<!-- START SNIPPET: example -->
<beans
xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd
http://activemq.apache.org/schema/core http://activemq.apache.org/schema/core/activemq-core.xsd">
<bean class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer">
<property name="locations">
<value>file:${activemq.conf}/credentials.properties</value>
</property>
</bean>
<bean id="logQuery" class="org.fusesource.insight.log.log4j.Log4jLogQuery"
lazy-init="false" scope="singleton"
init-method="start" destroy-method="stop">
</bean>
<broker xmlns="http://activemq.apache.org/schema/core" brokerName="localhost" dataDirectory="${activemq.data}" schedulerSupport="true">
<destinationPolicy>
<policyMap>
<policyEntries>
<policyEntry topic=">" >
<pendingMessageLimitStrategy>
<constantPendingMessageLimitStrategy limit="1000"/>
</pendingMessageLimitStrategy>
</policyEntry>
</policyEntries>
</policyMap>
</destinationPolicy>
<plugins>
<redeliveryPlugin fallbackToDeadLetter="true" sendToDlqIfMaxRetriesExceeded="true">
<redeliveryPolicyMap>
<redeliveryPolicyMap>
<defaultEntry>
<redeliveryPolicy maximumRedeliveries="5" initialRedeliveryDelay="1000" redeliveryDelay="1000" useExponentialBackOff ="true" backOffMultiplier="5"/>
</defaultEntry>
</redeliveryPolicyMap>
</redeliveryPolicyMap>
</redeliveryPlugin>
</plugins>
<managementContext>
<managementContext createConnector="false"/>
</managementContext>
<persistenceAdapter>
<kahaDB directory="${activemq.data}/kahadb" checkForCorruptJournalFiles="true" checksumJournalFiles="true" journalMaxFileLength="32mb"/>
</persistenceAdapter>
<systemUsage>
<systemUsage>
<memoryUsage>
<memoryUsage percentOfJvmHeap="70" />
</memoryUsage>
<storeUsage>
<storeUsage limit="100 gb"/>
</storeUsage>
<tempUsage>
<tempUsage limit="50 gb"/>
</tempUsage>
</systemUsage>
</systemUsage>
<transportConnectors>
<transportConnector name="openwire" uri="tcp://0.0.0.0:61617?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
<transportConnector name="amqp" uri="amqp://0.0.0.0:5672?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
<transportConnector name="stomp" uri="stomp://0.0.0.0:61613?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
<transportConnector name="mqtt" uri="mqtt://0.0.0.0:1883?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
<transportConnector name="ws" uri="ws://0.0.0.0:61614?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
</transportConnectors>
<!-- destroy the spring context on shutdown to stop jetty -->
<shutdownHooks>
<bean xmlns="http://www.springframework.org/schema/beans" class="org.apache.activemq.hooks.SpringContextHook" />
</shutdownHooks>
</broker>
<import resource="jetty.xml"/>
</beans>
<!-- END SNIPPET: example -->
and the tomee xml
<tomee>
<Resource id="ActiveMQResourceAdapter" type="ActiveMQResourceAdapter">
# Do not start the embedded ActiveMQ broker
BrokerXmlConfig =
ServerUrl = tcp://10.81.1.28:61617>
</Resource>
<Resource id="resources/jms/ConnectionFactory" type="javax.jms.ConnectionFactory">
ResourceAdapter = ActiveMQResourceAdapter
maximumRedeliveries 1
</Resource>
<Resource id="testxa" class-name="com.mysql.jdbc.jdbc2.optional.MysqlXADataSource">
Url jdbc:mysql://localhost:3306/test?autoReconnect=true
User root
</Resource>
<Resource id="movieDatabase" type="DataSource">
XaDataSource testxa
DataSourceCreator dbcp
UserName root
</Resource>
<!-- activate next line to be able to deploy applications in apps -->
<Deployments dir="apps" />
</tomee>
This works and the queue gets redelivered expediently when i try and have tomee start the activemq with the same activemq xml and the tomee xml of
<tomee>
<Resource id="ActiveMQResourceAdapter" type="ActiveMQResourceAdapter">
BrokerXmlConfig=broker:(vm://localhost)
BrokerURL = vm://localhost
</Resource>
<Resource id="resources/jms/ConnectionFactory" type="javax.jms.ConnectionFactory">
ResourceAdapter = ActiveMQResourceAdapter
maximumRedeliveries 1
</Resource>
<Resource id="testxa" class-name="com.mysql.jdbc.jdbc2.optional.MysqlXADataSource">
Url jdbc:mysql://localhost:3306/test?autoReconnect=true
User root
</Resource>
<Resource id="movieDatabase" type="DataSource">
XaDataSource testxa
DataSourceCreator dbcp
UserName root
</Resource>
<!-- activate next line to be able to deploy applications in apps -->
<Deployments dir="apps" />
</tomee>
the configurations are ignored and the queue is not redelivered something else needed to be done so tomee uses the redelivery policy?

I think it is redeliveryUseExponentialBackOff and not useExponentialBackOff: http://activemq.apache.org/maven/5.10.0/apidocs/org/apache/activemq/ra/ActiveMQManagedConnectionFactory.html
Basically use setters.

Why not setting it on resource adapter BTW?

By default tomee deactivate scheduler service so maybe this doesnt work with the embedded broker.
Did you try using an external one of adding jar needed to support xbean xml configuration of the broker? (http://tomee.apache.org/jms-resources-and-mdb-container.html)

Answering this old question so maybe someone might benefit from this (I came across a lot of similar questions). I've had a similar problem where the RedeliveryPolicy was not applied correctly. By setting the maximumRedeliveries to -1 it was infinitely retrying to process the message, but the delays were ignored. Turned out that redelivery was not working correcty due to the default cacheLevel field on Springs DefaultMessageListenerContainer.
Setting this level to CACHE_CONSUMER solved the issue!

Related

ActiveMQ authentication on Spring Boot

I can consume and publish a message to queue without authentication in Spring App via ActiveMQ.
server.port=9999
spring.activemq.broker-url=tcp://localhost:61616
spring.activemq.user=admin
spring.activemq.password=admin
spring.activemq.packages.trust-all=true
I deleted user and password line. It works. But I want to take error when deleted following line or when wrong password and username.
spring.activemq.user=admin
spring.activemq.password=admin
My authentication is true but it doesn't work.
<bean id="securityConstraint" class="org.eclipse.jetty.util.security.Constraint">
<property name="name" value="BASIC" />
<property name="roles" value="user,admin" />
<!-- set authenticate=false to disable login -->
<property name="authenticate" value="true" />
</bean>
<bean id="adminSecurityConstraint" class="org.eclipse.jetty.util.security.Constraint">
<property name="name" value="BASIC" />
<property name="roles" value="admin" />
<!-- set authenticate=false to disable login -->
<property name="authenticate" value="true" />
</bean>
How to get an error when Password or username is entered incorrectly? How can I open username and password for Spring boot? (I'm not asking about ActiveMQ web console authentication)
activemq.xml :
<beans
xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd
http://activemq.apache.org/schema/core
http://activemq.apache.org/schema/core/activemq-core.xsd">
<!-- Allows us to use system properties as variables in this configuration file -->
<bean class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer">
<property name="locations">
<value>file:${activemq.conf}/credentials.properties</value>
</property>
</bean>
<bean id="logQuery" class="io.fabric8.insight.log.log4j.Log4jLogQuery"
lazy-init="false" scope="singleton"
init-method="start" destroy-method="stop">
</bean>
<broker xmlns="http://activemq.apache.org/schema/core" brokerName="localhost" dataDirectory="${activemq.data}">
<destinationPolicy>
<policyMap>
<policyEntries>
<policyEntry topic=">" >
<!-- The constantPendingMessageLimitStrategy is used to prevent
slow topic consumers to block producers and affect other consumers
by limiting the number of messages that are retained
For more information, see:
http://activemq.apache.org/slow-consumer-handling.html
-->
<pendingMessageLimitStrategy>
<constantPendingMessageLimitStrategy limit="1000"/>
</pendingMessageLimitStrategy>
</policyEntry>
</policyEntries>
</policyMap>
</destinationPolicy>
<!--
The managementContext is used to configure how ActiveMQ is exposed in
JMX. By default, ActiveMQ uses the MBean server that is started by
the JVM. For more information, see:
http://activemq.apache.org/jmx.html
-->
<managementContext>
<managementContext createConnector="false"/>
</managementContext>
<!--
Configure message persistence for the broker. The default persistence
mechanism is the KahaDB store (identified by the kahaDB tag).
For more information, see:
http://activemq.apache.org/persistence.html
-->
<persistenceAdapter>
<kahaDB directory="${activemq.data}/kahadb"/>
</persistenceAdapter>
<!--
The systemUsage controls the maximum amount of space the broker will
use before disabling caching and/or slowing down producers. For more information, see:
http://activemq.apache.org/producer-flow-control.html
-->
<systemUsage>
<systemUsage>
<memoryUsage>
<memoryUsage percentOfJvmHeap="70" />
</memoryUsage>
<storeUsage>
<storeUsage limit="100 gb"/>
</storeUsage>
<tempUsage>
<tempUsage limit="50 gb"/>
</tempUsage>
</systemUsage>
</systemUsage>
<!--
The transport connectors expose ActiveMQ over a given protocol to
clients and other brokers. For more information, see:
http://activemq.apache.org/configuring-transports.html
-->
<transportConnectors>
<!-- DOS protection, limit concurrent connections to 1000 and frame size to 100MB -->
<transportConnector name="openwire" uri="tcp://0.0.0.0:61616?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
<transportConnector name="amqp" uri="amqp://0.0.0.0:5672?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
<transportConnector name="stomp" uri="stomp://0.0.0.0:61613?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
<transportConnector name="mqtt" uri="mqtt://0.0.0.0:1883?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
<transportConnector name="ws" uri="ws://0.0.0.0:61614?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
</transportConnectors>
<!-- destroy the spring context on shutdown to stop jetty -->
<shutdownHooks>
<bean xmlns="http://www.springframework.org/schema/beans" class="org.apache.activemq.hooks.SpringContextHook" />
</shutdownHooks>
</broker>
<!--
Enable web consoles, REST and Ajax APIs and demos
The web consoles requires by default login, you can disable this in the jetty.xml file
Take a look at ${ACTIVEMQ_HOME}/conf/jetty.xml for more details
-->
<import resource="jetty.xml"/>
The conf/jetty.xml configuration is for the webconsole. The conf/users.properties is generally where user and group information is stored for the standardized messaging protocols (JMS/STOMP/MQTT/AMQP etc) user authentication with the Apache ActiveMQ distribution.
Please sharing your conf/activemq.xml for confirmation.

ActiveMQ poor performance on large number (tens of milions) of messages

CentOS 7
4 cores, 16G RAM, 500GB SSD
ActiveMQ 5.15.4
OpenJDK Runtime Environment AdoptOpenJDK (build 11.0.10+9)
ActiveMQ architecture
Machines 69 and 68 are built with network brokers. The activemq.xml of 68 and 69 are identical except for the relevant names such as broker names, hostname, IP, etc.
Topics in this architecture:
TOPIC_A_ALL_DELIVERED
TOPIC_A_ALL_RECEIVED
TOPIC_A_NN_DELIVERED, NN=00-23
TOPIC_A_NN_RECEIVED, NN=00-23
Persistence days: Current is 2 days, but the expected target is 7 days. This is how long to wait before expiring the message if a consumer doesn't connect back to retrieve it. I use this config: <timeStampingBrokerPlugin ttlCeiling="172800000" zeroExpirationOverride="172800000"/>.
Persistent store: kahadb, limit size is 200GB
Message format is XML, and each message size is about 1K-3K bytes.
Consumers
In addition to durably subscribing to TOPIC_A_ALL_XXXXXX each consumer also durably subscribes to other topics based on its requirements, but I have no idea how fast the consumers can consume the data. Neither do I know whether they actually subscribe all topics the need. I only know sometime some consumers stop receiving data due to debug their code and then connect back.
Producer
The producer is scheduled to run every 30 minutes. Whenever the producer works it only puts data to one single MQ server. The target MQ server depends on the protocol of failover connection.
Every time the producer will put over 800K quantity XML messages to topics starting with TOPIC_A. The XML contains a tag sit_id (00-23) and tag direction (DELIVERED or RECEIVED) so the producer will put each XML to its relevant topic based on the tag site_id and direction in the XML. The producer meanwhile put each XML into TOPIC_A_ALL_XXXXX based on the tag direction in each XML.
Based on the above data the average total quantity of message for each day is about 76,800,000.
Symptom
At beginning no messages are in kahadb. The speed for producer to put queue to a single MQ with one connection is up to 600~700 msgs/sec. As the amount of data increases the speed sending the message slows down. It slows to 3 msgs/sec and sometimes gets stuck. Whenever this situation happens the below situation can be observed:
From htop, one activemq process keeps consuming 100% of CPU (sometime up to 200%)
The free memory is normal (4~8GB at least)
Consumers receive nothing from both MQ servers.
The above situations almost happens everyday.
If I just wait there for 4-6 hours the MQ server will come back. The producer can send at 4xx~5xx msgs/sec, and then consumers can receive data.
This cycle keeps everyday. I really have no idea how to improve this situation. Any suggestions?
activemq.xml
<beans
xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd
http://activemq.apache.org/schema/core http://activemq.apache.org/schema/core/activemq-core.xsd">
<!-- Allows us to use system properties as variables in this configuration file -->
<bean class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer" />
<!-- Allows accessing the server log -->
<bean id="logQuery" class="io.fabric8.insight.log.log4j.Log4jLogQuery"
lazy-init="false" scope="singleton"
init-method="start" destroy-method="stop">
</bean>
<!--
The <broker> element is used to configure the ActiveMQ broker.
-->
<broker xmlns="http://activemq.apache.org/schema/core" brokerName="activemq-mdmcs02p-node1" dataDirectory="${activemq.data}" persistent="true" useJmx="true" populateJMSXUserID="true">
<destinations>
<topic physicalName="TOPIC_A.00.Delivered" />
<topic physicalName="TOPIC_A.00.Received" />
<topic physicalName="TOPIC_A.01.Delivered" />
<topic physicalName="TOPIC_A.01.Received" />
<topic physicalName="TOPIC_A.02.Delivered" />
<topic physicalName="TOPIC_A.02.Received" />
<topic physicalName="TOPIC_A.03.Delivered" />
<topic physicalName="TOPIC_A.03.Received" />
<topic physicalName="TOPIC_A.04.Delivered" />
<topic physicalName="TOPIC_A.04.Received" />
<topic physicalName="TOPIC_A.05.Delivered" />
<topic physicalName="TOPIC_A.05.Received" />
<topic physicalName="TOPIC_A.06.Delivered" />
<topic physicalName="TOPIC_A.06.Received" />
<topic physicalName="TOPIC_A.07.Delivered" />
<topic physicalName="TOPIC_A.07.Received" />
<topic physicalName="TOPIC_A.08.Delivered" />
<topic physicalName="TOPIC_A.08.Received" />
<topic physicalName="TOPIC_A.09.Delivered" />
<topic physicalName="TOPIC_A.09.Received" />
<topic physicalName="TOPIC_A.10.Delivered" />
<topic physicalName="TOPIC_A.10.Received" />
<topic physicalName="TOPIC_A.11.Delivered" />
<topic physicalName="TOPIC_A.11.Received" />
<topic physicalName="TOPIC_A.12.Delivered" />
<topic physicalName="TOPIC_A.12.Received" />
<topic physicalName="TOPIC_A.13.Delivered" />
<topic physicalName="TOPIC_A.13.Received" />
<topic physicalName="TOPIC_A.14.Delivered" />
<topic physicalName="TOPIC_A.14.Received" />
<topic physicalName="TOPIC_A.15.Delivered" />
<topic physicalName="TOPIC_A.15.Received" />
<topic physicalName="TOPIC_A.16.Delivered" />
<topic physicalName="TOPIC_A.16.Received" />
<topic physicalName="TOPIC_A.17.Delivered" />
<topic physicalName="TOPIC_A.17.Received" />
<topic physicalName="TOPIC_A.18.Delivered" />
<topic physicalName="TOPIC_A.18.Received" />
<topic physicalName="TOPIC_A.19.Delivered" />
<topic physicalName="TOPIC_A.19.Received" />
<topic physicalName="TOPIC_A.20.Delivered" />
<topic physicalName="TOPIC_A.20.Received" />
<topic physicalName="TOPIC_A.21.Delivered" />
<topic physicalName="TOPIC_A.21.Received" />
<topic physicalName="TOPIC_A.22.Delivered" />
<topic physicalName="TOPIC_A.22.Received" />
<topic physicalName="TOPIC_A.23.Delivered" />
<topic physicalName="TOPIC_A.23.Received" />
<topic physicalName="TOPIC_A_ALL.Delivered" />
<topic physicalName="TOPIC_A_ALL.Received" />
</destinations>
<destinationPolicy>
<policyMap>
<policyEntries>
<policyEntry topic=">" producerFlowControl="false" memoryLimit="4096mb" enableAudit="false" expireMessagesPeriod="60000" >
<!-- The constantPendingMessageLimitStrategy is used to prevent
slow topic consumers to block producers and affect other consumers
by limiting the number of messages that are retained
For more information, see:
http://activemq.apache.org/slow-consumer-handling.html
-->
<deadLetterStrategy>
<sharedDeadLetterStrategy processExpired="false"/>
</deadLetterStrategy>
<pendingMessageLimitStrategy>
<constantPendingMessageLimitStrategy limit="1000"/>
</pendingMessageLimitStrategy>
<networkBridgeFilterFactory>
<conditionalNetworkBridgeFilterFactory replayWhenNoConsumers="true"/>
</networkBridgeFilterFactory>
</policyEntry>
</policyEntries>
</policyMap>
</destinationPolicy>
<!--
<destinationInterceptors>
<virtualDestinationInterceptor>
<virtualDestinations>
<virtualTopic name=">" prefix="TOPIC_A.*" selectorAware="false"/>
</virtualDestinations>
</virtualDestinationInterceptor>
</destinationInterceptors>
-->
<networkConnectors>
<networkConnector uri="static:(tcp://192.168.11.68:61616)" userName="admin" password="XXXXX" dynamicOnly="true" prefetchSize="1" />
</networkConnectors>
<!--
The managementContext is used to configure how ActiveMQ is exposed in
JMX. By default, ActiveMQ uses the MBean server that is started by
the JVM. For more information, see:
http://activemq.apache.org/jmx.html
-->
<managementContext>
<managementContext createConnector="true"/>
</managementContext>
<!--
Configure message persistence for the broker. The default persistence
mechanism is the KahaDB store (identified by the kahaDB tag).
For more information, see:
http://activemq.apache.org/persistence.html
-->
<persistenceAdapter>
<kahaDB directory="/home/activemq/kahadb"
ignoreMissingJournalfiles="true"
checkForCorruptJournalFiles="true"
checksumJournalFiles="true"
enableJournalDiskSyncs="false"
/>
</persistenceAdapter>
<!--
The systemUsage controls the maximum amount of space the broker will
use before disabling caching and/or slowing down producers. For more information, see:
http://activemq.apache.org/producer-flow-control.html
-->
<systemUsage>
<systemUsage>
<memoryUsage>
<memoryUsage percentOfJvmHeap="70" />
</memoryUsage>
<storeUsage>
<storeUsage limit="80 gb"/>
</storeUsage>
<tempUsage>
<tempUsage limit="40 gb"/>
</tempUsage>
</systemUsage>
</systemUsage>
<!--
<sslContext>
<sslContext
keyStore="file:${activemq.base}conf/broker1.ks" keyStorePassword="P#ssw0rd"
/>
</sslContext>
-->
<!--
The transport connectors expose ActiveMQ over a given protocol to
clients and other brokers. For more information, see:
http://activemq.apache.org/configuring-transports.html
-->
<transportConnectors>
<!-- DOS protection, limit concurrent connections to 1000 and frame size to 100MB -->
<transportConnector name="openwire" uri="tcp://0.0.0.0:61616?maximumConnections=1000&wireFormat.maxFrameSize=1048576000"/>
<transportConnector name="nio" uri="nio://0.0.0.0:61617?trace=true"/>
<!-- <transportConnector name="ssl" uri="ssl://0.0.0.0:61618?trace=true&transport.enabledProtocols=TLSv1.2"/> -->
<transportConnector name="amqp" uri="amqp://0.0.0.0:5672?maximumConnections=1000&wireFormat.maxFrameSize=104857600&transport.transformer=jms"/>
<!-- <transportConnector name="amqp+ssl" uri="amqp://0.0.0.0:5673?maximumConnections=1000&wireFormat.maxFrameSize=104857600&transport.enabledProtocols=TLSv1.2"/> -->
<!-- <transportConnector name="stomp" uri="stomp://0.0.0.0:61613?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/> -->
<!-- <transportConnector name="mqtt" uri="mqtt://0.0.0.0:1883?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/> -->
<!-- <transportConnector name="ws" uri="ws://0.0.0.0:61614?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/> -->
</transportConnectors>
<!-- destroy the spring context on shutdown to stop jetty -->
<shutdownHooks>
<bean xmlns="http://www.springframework.org/schema/beans" class="org.apache.activemq.hooks.SpringContextHook" />
</shutdownHooks>
<plugins>
<!-- 86,400,000 ms = 1 day -->
<timeStampingBrokerPlugin ttlCeiling="172800000" zeroExpirationOverride="172800000"/>
<jaasAuthenticationPlugin configuration="activemq" />
<authorizationPlugin>
<map>
<authorizationMap>
<authorizationEntries>
<authorizationEntry queue=">" read="admins" write="admins" admin="admins" />
<authorizationEntry topic=">" read="admins" write="admins" admin="admins" />
<authorizationEntry topic="TOPIC_A.>" read="mdmsusers" write="mdmsusers" />
<authorizationEntry topic="TOPIC_A.Delivered" read="pwsusers" />
<authorizationEntry topic="ActiveMQ.Advisory.>" read="mdmsusers,pwsusers" write="mdmsusers,pwsusers" admin="mdmsusers,pwsusers"/>
</authorizationEntries>
<tempDestinationAuthorizationEntry>
<tempDestinationAuthorizationEntry read="admins" write="admins" admin="admins"/>
</tempDestinationAuthorizationEntry>
</authorizationMap>
</map>
</authorizationPlugin>
</plugins>
</broker>
<!--
Enable web consoles, REST and Ajax APIs and demos
The web consoles requires by default login, you can disable this in the jetty.xml file
Take a look at ${ACTIVEMQ_HOME}/conf/jetty.xml for more details
-->
<import resource="jetty.xml"/>
</beans>
I suspect you are hitting some sort of combination of flow control, fast producer, slow consumer, pending message limit or invalid client registration issue that needs to be sorted out. Perhaps even bug or optimization in that version of ActiveMQ.
Suggested steps:
Upgrade to latest 5.15.x. There are a lot of fixes and it does not make sense to troubleshoot against a build that old.
Enable Advisory Messages for advisoryForFastProducer and advisoryForSlowConsumer on the destinations. This will give you an ActiveMQ.Advisory topic to show when those scenarios occur.
Investigate the client and connections to make sure they are all properly registered with clientId and suscriptionName to get a durable subscription. Remember: 2 connections cannot share clientId+subscriptioName
Consider moving to Virtual Topics. This is where messages are sent to the topic and consumers read from queues. Much more flexibility and visibility over what is going on with flows. Also-- bonus it makes multi-broker shared subscriptions straight forward.

ActiveMQ broker to connect to Tibco EMS over SSL

Im trying to setup a broker between ActiveMQ and Tibco EMS with SSL Connectivity.
I have copied jms-2.0.jar, tibcrypt.jar, tibjms.jar and slf4j-api-1.7.13.jar under activemq/lib
I have following activemq.xml config
Issue: Broker starts fine. However its not able to capture New messages from EMS. neither it raises any error.
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd
http://activemq.apache.org/schema/core http://activemq.apache.org/schema/core/activemq-core.xsd">
<!-- Allows us to use system properties as variables in this configuration file -->
<bean class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer">
<property name="locations">
<value>file:${activemq.conf}/credentials.properties</value>
</property>
</bean>
<!-- Allows accessing the server log -->
<bean id="logQuery" class="io.fabric8.insight.log.log4j.Log4jLogQuery"
lazy-init="false" scope="singleton"
init-method="start" destroy-method="stop">
</bean>
<!-- JMS ConnectionFactory to use for local bridging -->
<bean id="tibco" class="com.tibco.tibjms.TibjmsQueueConnectionFactory">
<property name="serverUrl" value="ssl://10.88.66.225:7243" />
<property name="userName" value="admin" />
<property name="userPassword" value="admin123" />
<property name="SSLIdentity" value="/home/activemq/activemq/conf/client_identity.p12" />
<property name="SSLAuthOnly" value="true" />
</bean>
<!--
The <broker> element is used to configure the ActiveMQ broker.
-->
<broker xmlns="http://activemq.apache.org/schema/core" brokerName="localhost" dataDirectory="${activemq.data}">
<destinationPolicy>
<policyMap>
<policyEntries>
<policyEntry topic=">" >
<!-- The constantPendingMessageLimitStrategy is used to prevent
slow topic consumers to block producers and affect other consumers
by limiting the number of messages that are retained
For more information, see:
http://activemq.apache.org/slow-consumer-handling.html
-->
<pendingMessageLimitStrategy>
<constantPendingMessageLimitStrategy limit="1000"/>
</pendingMessageLimitStrategy>
</policyEntry>
</policyEntries>
</policyMap>
</destinationPolicy>
<!--
The managementContext is used to configure how ActiveMQ is exposed in
JMX. By default, ActiveMQ uses the MBean server that is started by
the JVM. For more information, see:
http://activemq.apache.org/jmx.html
-->
<managementContext>
<managementContext createConnector="false"/>
</managementContext>
<!--
Configure message persistence for the broker. The default persistence
mechanism is the KahaDB store (identified by the kahaDB tag).
For more information, see:
http://activemq.apache.org/persistence.html
-->
<persistenceAdapter>
<kahaDB directory="${activemq.data}/kahadb"/>
</persistenceAdapter>
<!--
The systemUsage controls the maximum amount of space the broker will
use before disabling caching and/or slowing down producers. For more information, see:
http://activemq.apache.org/producer-flow-control.html
-->
<systemUsage>
<systemUsage>
<memoryUsage>
<memoryUsage percentOfJvmHeap="70" />
</memoryUsage>
<storeUsage>
<storeUsage limit="100 gb"/>
</storeUsage>
<tempUsage>
<tempUsage limit="50 gb"/>
</tempUsage>
</systemUsage>
</systemUsage>
<!--
The transport connectors expose ActiveMQ over a given protocol to
clients and other brokers. For more information, see:
http://activemq.apache.org/configuring-transports.html
-->
<transportConnectors>
<!-- DOS protection, limit concurrent connections to 1000 and frame size to 100MB -->
<transportConnector name="openwire" uri="tcp://0.0.0.0:61616?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
<transportConnector name="amqp" uri="amqp://0.0.0.0:5672?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
<transportConnector name="stomp" uri="stomp://0.0.0.0:61613?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
<transportConnector name="mqtt" uri="mqtt://0.0.0.0:1883?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
<transportConnector name="ws" uri="ws://0.0.0.0:61614?maximumConnections=1000&wireFormat.maxFrameSize=104857600"/>
</transportConnectors>
<!-- destroy the spring context on shutdown to stop jetty -->
<shutdownHooks>
<bean xmlns="http://www.springframework.org/schema/beans"
class="org.apache.activemq.hooks.SpringContextHook" />
<!-- Dependencies: tibjms.jar must be in the activemq lib directory -->
<!-- bridging definitions for traffic to/from remote activemq instance -->
<jmsBridgeConnectors>
<jmsQueueConnector outboundQueueConnectionFactory="#tibco">
<inboundQueueBridges>
<inboundQueueBridge
inboundQueueName = "queue.Sample"
localQueueName = "queue.incomingMsgs.Sample"/>
</inboundQueueBridges>
<outboundQueueBridges>
<outboundQueueBridge
outboundQueueName = "queue.activemqtoems"
localQueueName = "queue.incomingMsgs.Sample"/>
</outboundQueueBridges>
</jmsQueueConnector>
</jmsBridgeConnectors>
</broker>
<!--
Enable web consoles, REST and Ajax APIs and demos
The web consoles requires by default login, you can disable this in the jetty.xml file
Take a look at ${ACTIVEMQ_HOME}/conf/jetty.xml for more details
-->
<import resource="jetty.xml"/>
</beans>
<!-- END SNIPPET: example -->
I strongly recommend using a bridge instead of wiring up broker-to-broker JMS with ActiveMQ. The broker-to-broker approach is complicated by error handling scenarios, which a bridge can be better customized to handle for your use case.

ActiveMQ Persistence Adapter

I have a network of brokers which works correctly with durable subscribers. They're on "store and forward" with "kahaDB persistence".
I have an available distributed database with its own JDBC Driver.
I want to know if it's worth creating a JDBC Adapter, for the broker to store messages on this shared database to complete this distributed network.
Is it possible? If yes, is it worth it?
My activemq.xml:
<beans
xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd
http://activemq.apache.org/schema/core http://activemq.apache.org/schema/core/activemq-core.xsd">
<!-- Allows us to use system properties as variables in this configuration file -->
<bean class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer">
<property name="locations">
<value>file:${activemq.conf}/credentials.properties</value>
</property>
</bean>
<!-- Allows log searching in hawtio console -->
<bean id="logQuery" class="org.fusesource.insight.log.log4j.Log4jLogQuery"
lazy-init="false" scope="singleton"
init-method="start" destroy-method="stop">
</bean>
<!-- We change the factory finder to instanciate the correct RedCurrant-class which are compatible -->
<bean id="FactoryDefinition" class="org.springframework.beans.factory.config.MethodInvokingFactoryBean">
<property name="targetClass">
<value>org.apache.activemq.util.FactoryFinder</value>
</property>
<property name="targetMethod">
<value>setObjectFactory</value>
</property>
<property name="arguments">
<list>
<bean id="ObjectFactory" class="io.redcurrant.activemq.util.RedCurrantObjectFactory"/>
</list>
</property>
</bean>
<!--
The <broker> element is used to configure the ActiveMQ broker.
-->
<broker xmlns="http://activemq.apache.org/schema/core" brokerName="RC1" dataDirectory="${activemq.data}" persistent="true">
<destinationPolicy>
<policyMap>
<policyEntries>
<policyEntry topic=">" >
<pendingMessageLimitStrategy>
<constantPendingMessageLimitStrategy limit="1000"/>
</pendingMessageLimitStrategy>
</policyEntry>
</policyEntries>
</policyMap>
</destinationPolicy>
<managementContext>
<managementContext createConnector="false"/>
</managementContext>
<persistenceAdapter>
<kahaDB directory="/DATA/TestNS/RC1/activemq-1"
concurrentStoreAndDispatchTopics="false"
enableJournalDiskSyncs="true"/>
</persistenceAdapter>
<systemUsage>
<systemUsage>
<memoryUsage>
<memoryUsage percentOfJvmHeap="70" />
</memoryUsage>
<storeUsage>
<storeUsage limit="100 gb"/>
</storeUsage>
<tempUsage>
<tempUsage limit="50 gb"/>
</tempUsage>
</systemUsage>
</systemUsage>
<transportConnectors>
<transportConnector name="openwire" uri="tcp://0.0.0.0:30000?maximumConnections=1000&wireFormat.maxFrameSize=104857600" discoveryUri="redcurrant://default:30000"/>
</transportConnectors>
<networkConnectors>
<networkConnector uri="redcurrant://default"/>
</networkConnectors>
<shutdownHooks>
<bean xmlns="http://www.springframework.org/schema/beans" class="org.apache.activemq.hooks.SpringContextHook" />
</shutdownHooks>
</broker>
<import resource="jetty.xml"/>
</beans>
I work on the integration of ActiveMQ on a distributed system named "RedCurrant", so I create a correct DiscoveryAgent and make some changes in a "jar" which I include in the ActiveMQ/lib.
What I really want to do is a network of broker with persistence. I currently use KahaDB to do this, but is it possible for the brokers to share a database to share messages of a topic more quickly. Should I create a JDBC Adapter (SQLite)?
JDBC/database is not the most performant way to persist messages in ActiveMQ.
I don't see the point in using a database anyway, since JDBC is used to share data between a master and a slave, and not between members in a network of brokers (NoB).
If you want short circuit distribution of messages inside a NoB using a database, you really miss the point of NoB. The brokers in the network should be able to independently handle which destinations to propagate and which to handle locally etc.
If you want the messages to propage through the distributed database instead using the brokers as simple connectors, you have some major work in front of you.

Infinispan and JGroups discovery on EC2

I'm trying to use my application on AWS EC2 on some Linux boxes with Tomcat servers. Previously I used my application with Infinispan on LAN and I used UDP multicasting for JGroups member discovery. EC2 does not support UDP multicasting and this is the default node discovery approach used by Infinispan to detect nodes running in a cluster. I looked into using the S3_PING protocol, but I have not figured out why it doesn't work.
Does anyone have any ideas what the problem might be here?
Here is my configuration files:
1. applicationContext-cache.xml
<!-- Infinispan cache -->
<cache:annotation-driven/>
<import resource="classpath:/applicationContext-dao.xml"/>
<bean id="cacheManager" class="org.infinispan.spring.provider.SpringEmbeddedCacheManagerFactoryBean">
<property name="configurationFileLocation" value="classpath:/infinispan-replication.xml"/>
</bean>
<context:component-scan base-package="com.alex.cache"/>
2.infinispan-replication.xml
<infinispan xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:5.1 http://www.infinispan.org/schemas/infinispan-config-5.1.xsd"
xmlns="urn:infinispan:config:5.1">
<global>
<transport transportClass="org.infinispan.remoting.transport.jgroups.JGroupsTransport">
<properties>
<property name="configurationFile" value="/home/akasiyanik/dev/projects/myapp/myapp-configs/jgroups.xml"/>
</properties>
</transport>
</global>
<default>
<!-- Configure a synchronous replication cache -->
<clustering mode="replication">
<sync/>
<hash numOwners="2"/>
</clustering>
</default>
</infinispan>
3. jgroups.xml
<config>
<TCP bind_port="${jgroups.tcp.port:7800}"
loopback="true"
port_range="30"
recv_buf_size="20000000"
send_buf_size="640000"
discard_incompatible_packets="true"
max_bundle_size="64000"
max_bundle_timeout="30"
enable_bundling="true"
use_send_queues="true"
sock_conn_timeout="300"
enable_diagnostics="false"
thread_pool.enabled="true"
thread_pool.min_threads="2"
thread_pool.max_threads="30"
thread_pool.keep_alive_time="60000"
thread_pool.queue_enabled="false"
thread_pool.queue_max_size="100"
thread_pool.rejection_policy="Discard"
oob_thread_pool.enabled="true"
oob_thread_pool.min_threads="2"
oob_thread_pool.max_threads="30"
oob_thread_pool.keep_alive_time="60000"
oob_thread_pool.queue_enabled="false"
oob_thread_pool.queue_max_size="100"
oob_thread_pool.rejection_policy="Discard"
/>
<S3_PING location="r********s" access_key="AK***************SIA"
secret_access_key="y*************************************BJ" timeout="2000" num_initial_members="2"/>
<MERGE2 max_interval="30000"
min_interval="10000"/>
<FD_SOCK/>
<FD timeout="3000" max_tries="3"/>
<VERIFY_SUSPECT timeout="1500"/>
<BARRIER />
<pbcast.NAKACK use_mcast_xmit="false"
exponential_backoff="500"
discard_delivered_msgs="true"/>
<UNICAST />
<pbcast.STABLE stability_delay="1000" desired_avg_gossip="50000"
max_bytes="4M"/>
<pbcast.GMS print_local_addr="true" join_timeout="3000"
view_bundling="true"/>
<UFC max_credits="2M"
min_threshold="0.4"/>
<MFC max_credits="2M"
min_threshold="0.4"/>
<FRAG2 frag_size="60K" />
<pbcast.STATE_TRANSFER/>
</config>
Use this: https://github.com/meltmedia/jgroups-aws
It is an implementation of JGroups discovery protocol for AWS using AWS API (multicast replacement)

Resources