Bitronix - JMS and JDBC - Message is dequeued on Exception - spring-boot

I'm trying to integrate Bitronix Transaction Manager in my Spring boot project to manage jdbc and jms transaction together. I have two databases and one ActiveMQ broker for jms. I've got connect the databases in the same transaction but when I tried to include JMS, It seems not to work.
This is my Bitronix Transaction Manager configuration:
#Configuration
#EnableTransactionManagement
public class BitronixJtaConfiguration {
private static final Logger log = LoggerFactory.getLogger(BitronixJtaConfiguration.class);
#Value("${bitronix.tm.serverId}")
private String serverId;
#Value("${bitronix.tm.journal.disk.logPart1Filename:}")
private String logPart1Filename;
#Value("${bitronix.tm.journal.disk.logPart2Filename:}")
private String logPart2Filename;
#Bean
public bitronix.tm.Configuration transactionManagerServices() {
bitronix.tm.Configuration configuration = TransactionManagerServices.getConfiguration();
configuration.setServerId(serverId);
if ("".equals(logPart1Filename) && "".equals(logPart2Filename)) {
configuration.setJournal(null);
log.info("Disable journal for testing.");
} else {
configuration.setLogPart1Filename(logPart1Filename);
configuration.setLogPart2Filename(logPart2Filename);
}
return configuration;
}
#Bean
public TransactionManager transactionManager() {
return TransactionManagerServices.getTransactionManager();
}
#Bean
public UserTransaction userTransaction() {
return TransactionManagerServices.getTransactionManager();
}
#Bean
public PlatformTransactionManager platformTransactionManager() {
UserTransaction userTransaction = userTransaction();
TransactionManager transactionManager = transactionManager();
return new JtaTransactionManager(userTransaction, transactionManager);
}
}
This is one of my database configuration class:
#Configuration
#EnableTransactionManagement
public class TransportationPlanDBConfig {
private static final Logger LOGGER = LoggerFactory.getLogger("ppalfile");
#Value("${tp.jdbc.driverClassName}")
private String driverClassName;
#Value("${tp.jdbc.username}")
private String username;
#Value("${tp.jdbc.url}")
private String url;
#Value("${tp.jdbc.password}")
private String password;
#Value("${tp.c3p0.max_size}")
private int c3p0MaxSize;
#Value("${tp.c3p0.min_size}")
private int c3p0MinSize;
#Value("${tp.c3p0.unreturned_connection_timeout}")
private int c3p0UnreturnedConnectionTimeout;
#Value("${tp.c3p0.acquire_increment}")
private int c3p0AcquireIncrement;
#Value("${tp.c3p0.max_idle_time}")
private int c3p0MaxIdleTime;
public TransportationPlanDBConfig() {
// Empty constructor
}
#Bean(name = "tpds", destroyMethod = "close")
public DataSource dataSource() {
LOGGER.debug("Creating Transportation plan DS");
PoolingDataSource poolingDataSource = new PoolingDataSource();
poolingDataSource.setClassName(driverClassName);
poolingDataSource.setUniqueName("tpds");
Properties props = new Properties();
props.put("url", url);
props.put("user", username);
props.put("password", password);
poolingDataSource.setDriverProperties(props);
poolingDataSource.setAllowLocalTransactions(true);
poolingDataSource.setMaxPoolSize(c3p0MaxSize);
poolingDataSource.init();
return poolingDataSource;
}
#Bean(name = "tpJdbcTemplate")
JdbcTemplate jdbcTemplate(#Qualifier("tpds") DataSource dataSource) {
LOGGER.debug("Creating JdbcTemplate transport plan");
JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource);
LOGGER.debug(" JdbcTemplate Transport Plan created ");
return jdbcTemplate;
}
}
My ActiveMQ configuration class:
#Configuration
#EnableTransactionManagement
public class ActivesMQsConfiguration {
#Bean
public ConnectionFactory jmsConnectionFactoryLocal() {
PoolingConnectionFactory btmPoolingConnectionFactory = new PoolingConnectionFactory();
btmPoolingConnectionFactory.setClassName("org.apache.activemq.ActiveMQXAConnectionFactory");
btmPoolingConnectionFactory.setUniqueName("AMQLocal");
btmPoolingConnectionFactory.setMinPoolSize(1);
btmPoolingConnectionFactory.setMaxPoolSize(5);
btmPoolingConnectionFactory.setAllowLocalTransactions(true);
btmPoolingConnectionFactory.setUser("admin");
btmPoolingConnectionFactory.setPassword("admin");
btmPoolingConnectionFactory.getDriverProperties().setProperty("brokerURL", "tcp://localhost:61616");
btmPoolingConnectionFactory.init();
return btmPoolingConnectionFactory;
}
#Bean
public JmsListenerContainerFactory<?> jmsListenerContainerFactoryLocal(
#Qualifier("jmsConnectionFactoryLocal") ConnectionFactory connectionFactory,
DefaultJmsListenerContainerFactoryConfigurer configurer) {
DefaultJmsListenerContainerFactory factory = new DefaultJmsListenerContainerFactory();
factory.setConnectionFactory(connectionFactory);
factory.setSessionTransacted(true);
configurer.configure(factory, connectionFactory);
return factory;
}
}
My JMS Listener implemetation:
#Component
#Transactional
public class ContactTransactionReceiver {
private int mensajesConsumer2 = 0;
#Autowired
#Qualifier("versionJdbcTemplate")
private JdbcTemplate versionJdbcTemplate;
#Autowired
#Qualifier("tpJdbcTemplate")
private JdbcTemplate tpjdbcTemplate;
#Autowired
private VersionsConfDao versionsConfDao;
#Autowired
private TrainDao trainDao;
#Transactional(rollbackFor=Exception.class)
#JmsListener(destination = "Consumer.consumer2.VirtualTopic.TopicPrueba")
public void receiveMessageFromContacts2(Message message) throws Exception {
mensajesConsumer2++;
TextMessage txtMessage = (TextMessage) message;
System.out.println("Segundo consumer:" + txtMessage.getText() + " recibidos:" + mensajesConsumer2);
VersionsConf versionsconf = new VersionsConf("V" + mensajesConsumer2, "V" + mensajesConsumer2, false,new Timestamp(1L), 1);
VersionsConf versionsResult = versionsConfDao.insertUpdate(versionJdbcTemplate, versionsconf);
if (mensajesConsumer2 == 2) {
throw new Exception();
}
Train train = new Train("101"+mensajesConsumer2, 1L, 2L, false, true, "atp");
Train trainResult = trainDao.insertUpdate(tpjdbcTemplate, train);
if (mensajesConsumer2 == 3) {
throw new Exception();
}
}
}
Based on my listener implementation, as I understood Bitronix functionality:
On first incoming message: Must insert one row in each database and dequeue the message. -> This works fine.
On second and third incoming message: Must insert 0 rows due to the exception and keep the message in the queue. -> No rows inserted but the message is dequeued.
Moreover, I'd like to add that It logs the following during the execution:
[main] bitronix.tm.recovery.Recoverer: recovery committed 0 dangling transaction(s) and rolled back 0 aborted transaction(s) on 4 resource(s) [AMQLocal, vds, AMQRemote, tpds]
So, I understood that both brokers and both data bases are registered. But when the listener process the second message (It throws an exception), and It logs:
WARN 5740 [Session Task-1] bitronix.tm.twopc.Preparer : executing transaction with 0 enlisted resource
Any idea about the problem??
You can find the full code on: https://github.com/PedroRamirezTOR/spring-jms-jdbc-integration.git
Thanks!

First, the recovery committed 0 dangling transaction(s) and rolled back 0 aborted transaction(s) on 4 resource(s) message is going to appear every now and then and it is perfectly normal. You can ignore it as long as the committed and rolled back counters are at zero.
The executing transaction with 0 enlisted resource log looks like the real deal.
I highly suspect a problem with your Spring setup. I'm no Spring expert by any mean, but the DefaultJmsListenerContainerFactory should have a reference to your Spring PlatformTransactionManager instance, so that it knows it has to work transactionally, so you should call factory.setTransactionManager(PlatformTransactionManager).
This should at least move you to the next step.

Related

How to explicit execute begin (commit, rollback) transaction with PlatformTransactionManager?

I'm using Spring PlatformTransactionManager with jTds and Sybase ASE 16.
I cannot use AutoCommit because it will SET CHAINED ON and the database I'm working has triggers with unchained procedures.
So my connection has SET CHAINED OFF (AutoCommit=false).
In this scenario, SYBASE ASE requires explicit begin transaction, commit transaction or rollback transaction. If not, it will completely ignore any transaction.
I'm looking for a way to tell the PlatformTransactionManager to execute begin transaction, commit transaction or rollback transation. Is it possible?
Update -> My configuration:
spring.sybase.url=jdbc:jtds:sybase://hostname:6000/db;autoCommit=false;appName=integration
spring.sybase.username=myuser
spring.sybase.password=mypass
spring.sybase.driver-class-name=net.sourceforge.jtds.jdbc.Driver
spring.sybase.hikari.pool-name=datasource-pool
spring.sybase.hikari.connection-timeout=3000
spring.sybase.hikari.maximum-pool-size=5
spring.sybase.hikari.minimum-idle=0
spring.sybase.hikari.transaction-isolation=TRANSACTION_READ_COMMITTED
spring.sybase.hikari.auto-commit=false
spring.sybase.hikari.connection-test-query=SELECT 1
Bean Configuration
#Configuration
#EnableTransactionManagement
public class SybaseDataSourceConfig {
public static final String SYBASE_JDBC_TEMPL = "sybaseJdbcTempl";
public static final String SYBASE_DS = "sybaseDs";
public static final String SYBASE_DS_PROPERTIES = "sybaseDsProp";
public static final String SYBASE_TRAN_MANAGER = "sybaseTM";
#Bean(name = SYBASE_DS_PROPERTIES)
#ConfigurationProperties("spring.sybase")
public DataSourceProperties sybaseDataSourceProperties() {
return new DataSourceProperties();
}
#Bean(name = SYBASE_DS)
#ConfigurationProperties("spring.sybase.hikari")
public HikariDataSource sybaseDataSource(#Qualifier(SYBASE_DS_PROPERTIES) final DataSourceProperties sybaseDataSourceProperties) {
return sybaseDataSourceProperties.initializeDataSourceBuilder().type(HikariDataSource.class).build();
}
#Bean(name = SYBASE_JDBC_TEMPL)
public JdbcTemplate sybaseJdbcTemplate(#Qualifier(SYBASE_DS) final HikariDataSource sybaseDataSource) {
return new JdbcTemplate(sybaseDataSource);
}
#Bean(name = SYBASE_TRAN_MANAGER)
public PlatformTransactionManager transactionManager(#Qualifier(SYBASE_DS) final DataSource sybaseDataSource) {
DataSourceTransactionManager dataSourceTransactionManager = new DataSourceTransactionManager();
dataSourceTransactionManager.setDataSource(sybaseDataSource);
return dataSourceTransactionManager;
}
}

Explicitly Start Kafka Consumer In Main After method runs

I have a spring boot service that consumes from a kafka topic. When i consume i perform certain tasks on the kafka message. Before i can perform these operations i need to wait for the service to load some data into caches that i have set up. My issue is if i set kafka consumer to autostart it starts consuming before the cache loads and it errors out.
I am trying to explicitly start the consumer after i load the cache however i get null pointer exceptions.
#Configuration
public class KafkaConfig {
#Value("${kafka.server}")
String server;
#Value("${kafka.port}")
String port;
#Value("${kafka.group.id}")
String groupid;
#Bean
public ConsumerFactory<String, String> consumerFactory() {
Map<String, Object> config = new HashMap<>();
config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, server+":"+port);
config.put(ConsumerConfig.GROUP_ID_CONFIG, groupid);
config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
// config.put("security.protocol","SASL_PLAINTEXT");
// config.put("sasl.kerberos.service.name","kafka");
return new DefaultKafkaConsumerFactory<>(config);
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory();
factory.setConsumerFactory(consumerFactory());
factory.setAutoStartup(false);
return factory;
}
}
KafkaListener
#Service
public class KafkaConsumer {
#Autowired
AggregationService aggregationService;
#Autowired
private KafkaListenerEndpointRegistry registry;
private final CounterService counterService;
public KafkaConsumer(CounterService counterService) {
this.counterService = counterService;
}
#KafkaListener(topics = "gliTransactionTopic", group = "gliDecoupling", id = "gliKafkaListener")
public boolean consume(String message,
#Header(KafkaHeaders.RECEIVED_PARTITION_ID) Integer partition,
#Header(KafkaHeaders.OFFSET) Long offset) throws ParseException {
System.out.println("Inside kafka listener :" + message+" partition :"+partition.toString()+" offset :"+offset.toString());
aggregationService.run();
return true;
}
}
service To start stop
#Service
public class DecouplingController {
#Autowired
private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry;
public void stop() {
MessageListenerContainer listenerContainer = kafkaListenerEndpointRegistry
.getListenerContainer("gliKafkaListener");
listenerContainer.stop();
}
public void start() {
MessageListenerContainer listenerContainer = kafkaListenerEndpointRegistry
.getListenerContainer("gliKafkaListener");
listenerContainer.start();
}
}
main method
#SpringBootApplication
public class DecouplingApplication {
Ignite ignite;
static IgniteCache<Long, MappingsEntity> mappingsCache;
public static void main(String[] args) {
SpringApplication.run(DecouplingApplication.class, args);
Ignition.setClientMode(true);
Ignite ignite = Ignition.ignite("ignite");
loadCaches(ignite);
}
public static boolean loadCaches(Ignite ignite) {
mappingsCache = ignite.getOrCreateCache("MappingsCache");
mappingsCache.loadCache(null);
System.out.println("Data Loaded");
DecouplingController dc=new DecouplingController();
dc.start();
return true;
}
}
Below is the exception
Data Loaded
Exception in thread "main" java.lang.NullPointerException
at com.ignite.spring.decoupling.controller.DecouplingController.start(DecouplingController.java:126)
at com.ignite.spring.decoupling.DecouplingApplication.loadCaches(DecouplingApplication.java:64)
at com.ignite.spring.decoupling.DecouplingApplication.main(DecouplingApplication.java:37)
Instead of manually creating an object of DecouplingController , autowire the dependency in DecouplingApplication.
#Autowired
DecouplingController deDecouplingController;
The ApplicationContext which deals with autowired dependencies is not aware about the object you manually created using "new". The autowired kafkaListenerEndpointRegistry is unknown to the new DecouplingController object you created.
Seems like the gliKafkaListener1 was not registred an some part of ConsumerConfig/ListenerConfig

springboot jta transaction rollback is not working

I want to perform transaction across database using JTA (using atomikos) configuration.
I have below code which I wanted to perform in one transaction. However when I run the application, it saves entityObject1 and update eventObject2 and doesnt rollback when an exception is thrown when i run l.intValue() statement. below is all code that I am using with configuration for JTA.
Am i missing anything? Could anyone please help.
public void testJTATRansaction() {
service1.saveEvent1(eventObject1);
service2.updateEvent2(eventObject2);
}
saveEvent1 method in service1:
#Transactional(propagation=Propagation.REQUIRED, rollbackFor = Exception.class)
public int saveEvent1(Object eventObject1) {
return repository1.save(eventObject1);
}
updateEvent2 method in service2:
#Transactional(propagation=Propagation.REQUIRED, rollbackFor = Exception.class)
public int updateEvent2(Object eventObject2) {
int i = l.intValue(); //l is null object, to throw error
return repository2.updateEvent2(eventObject2);
}
I am using default save method from repository1 (JPARepository save method).
updateEvent2 method in repository2 class:
#Modifying
#Transactional(propagation=Propagation.REQUIRED, rollbackFor = Exception.class)
#Query(UPDATE_EVENTS)
public int updateEvent2(
#Param(value = "eventObject2") Object eventObject2);
I am using spring boot application class to initialise my application:
#SpringBootApplication
#ComponentScan("com.cbc.event")
#EnableTransactionManagement
public class RatingDaemonApplication {
public static void main(String[] args) throws Exception {
SpringApplication.run(RatingDaemonApplication.class);
}
}
I have below JTA configuration:
#Configuration
#ComponentScan
#EnableTransactionManagement
public class JTATransactionConfig {
#Bean
public JpaVendorAdapter jpaVendorAdapter() {
HibernateJpaVendorAdapter hibernateJpaVendorAdapter = new HibernateJpaVendorAdapter();
hibernateJpaVendorAdapter.setShowSql(true);
hibernateJpaVendorAdapter.setGenerateDdl(true);
hibernateJpaVendorAdapter.setDatabase(Database.MYSQL);
return hibernateJpaVendorAdapter;
}
#Bean(name = "userTransaction")
public UserTransaction userTransaction() throws Throwable {
UserTransactionImp userTransactionImp = new UserTransactionImp();
userTransactionImp.setTransactionTimeout(10000);
return userTransactionImp;
}
#Bean(name = "atomikosTransactionManager", initMethod = "init", destroyMethod = "close")
public TransactionManager atomikosTransactionManager() throws Throwable {
UserTransactionManager userTransactionManager = new UserTransactionManager();
userTransactionManager.setForceShutdown(false);
AtomikosJtaPlatform.transactionManager = userTransactionManager;
return userTransactionManager;
}
#Bean(name = "transactionManager")
#DependsOn({ "userTransaction", "atomikosTransactionManager" })
public PlatformTransactionManager transactionManager() throws Throwable {
UserTransaction userTransaction = userTransaction();
AtomikosJtaPlatform.transaction = userTransaction;
TransactionManager atomikosTransactionManager = atomikosTransactionManager();
return new JtaTransactionManager(userTransaction, atomikosTransactionManager);
}
}
and datasource configuration is:
#Configuration
#DependsOn("transactionManager")
#PropertySource({"classpath:application.properties"})
#EnableJpaRepositories(basePackages = {"com.cbc.repository"},
transactionManagerRef="transactionManager", entityManagerFactoryRef = "entityMF")
public class dataSourceConfiguration {
#Autowired
Environment env;
#Autowired
JpaVendorAdapter jpaVendorAdapter;
public DataSource eventsDS() {
AtomikosDataSourceBean xaDS = new AtomikosDataSourceBean();
xaDS.setXaDataSourceClassName(env.getProperty(DRIVER_CLASS_NAME));
xaDS.setXaDataSource(getMysqlXADataSource());
xaDS.setUniqueResourceName("DS");
xaDS.setMaxPoolSize(3);
return xaDS;
}
private MysqlXADataSource getMysqlXADataSource() {
MysqlXADataSource ds = new MysqlXADataSource();
ds.setPinGlobalTxToPhysicalConnection(true);
ds.setURL(env.getProperty(URL));
ds.setUser(env.getProperty(USER));
ds.setPassword(env.getProperty(PASSWORD));
return ds;
}
#Bean(name="entityMF")
public LocalContainerEntityManagerFactoryBean importedEventsEntityMF() {
Map<String, Object> properties = new HashMap<>();
properties.put("hibernate.transaction.jta.platform", AtomikosJtaPlatform.class.getName());
properties.put("javax.persistence.transactionType", "JTA");
LocalContainerEntityManagerFactoryBean entityManager = new LocalContainerEntityManagerFactoryBean();
entityManager.setJtaDataSource(eventsDS());
entityManager.setJpaVendorAdapter(jpaVendorAdapter);
entityManager.setPackagesToScan("com.cbc.events");
entityManager.setPersistenceUnitName("persistenceUnit");
entityManager.setJpaPropertyMap(properties);
return entityManager;
}
}
I have below AtomikosJtaPlatform class
public class AtomikosJtaPlatform extends AbstractJtaPlatform {
private static final long serialVersionUID = 1L;
static TransactionManager transactionManager;
static UserTransaction transaction;
#Override
protected TransactionManager locateTransactionManager() {
return transactionManager;
}
#Override
protected UserTransaction locateUserTransaction() {
return transaction;
}
}
This is from spring documentation
When the propagation setting is PROPAGATION_REQUIRED, a logical transaction scope is created for each method upon which the setting is applied. Each such logical transaction scope can determine rollback-only status individually, with an outer transaction scope being logically independent from the inner transaction scope. Of course, in case of standard PROPAGATION_REQUIRED behavior, all these scopes will be mapped to the same physical transaction. So a rollback-only marker set in the inner transaction scope does affect the outer transaction's chance to actually commit (as you would expect it to).
However, in the case where an inner transaction scope sets the rollback-only marker, the outer transaction has not decided on the rollback itself, and so the rollback (silently triggered by the inner transaction scope) is unexpected. A corresponding UnexpectedRollbackException is thrown at that point. This is expected behavior so that the caller of a transaction can never be misled to assume that a commit was performed when it really was not. So if an inner transaction (of which the outer caller is not aware) silently marks a transaction as rollback-only, the outer caller still calls commit. The outer caller needs to receive an UnexpectedRollbackException to indicate clearly that a rollback was performed instead.
Transaction propagation
Try changing the method declarations as below and give it a go
public int saveEvent1(Object eventObject1) throws UnexpectedRollbackException
public int updateEvent2(Object eventObject2) throws UnexpectedRollbackException
To avoid such things Its a good idea to have a separate method in one of those service classes or a completely different service class , and call both repository operations in one go , with transaction annotation
Also when you have the service methods annotated with transaction annotation then you dont need to annotate you repository methods , the more annotations you have related to transactions more complex it is to resolve issue.
Using h2 datasource,the distributed transaction is success.
But use mysql datasource,it is tested fail.
(1) First doubt the atomikos do not support MysqlXADataSource good.
(2) second think the JPA and hibernate is not support JTA so good.
Then I tink use jdbc.
#Configuration
public class ArticleConfigure {
#ConfigurationProperties("second.datasource")
#Bean(name="articleDataSourceProperties")
public DataSourceProperties secondDataSourceProperties() {
return new DataSourceProperties();
}
//#Bean(name = "articleDataSource")
#Bean(name = "articleDataSource")
public DataSource articleDataSource() {
MysqlXADataSource mdatasource = new MysqlXADataSource();
mdatasource.setUrl(secondDataSourceProperties().getUrl());
mdatasource.setUser(secondDataSourceProperties().getUsername());
mdatasource.setPassword(secondDataSourceProperties().getPassword());
/*JdbcDataSource h2XaDataSource = new JdbcDataSource();
h2XaDataSource.setURL(secondDataSourceProperties().getUrl());*/
//atomikos datasource configure
com.atomikos.jdbc.AtomikosDataSourceBean xaDataSource = new AtomikosDataSourceBean();
xaDataSource.setXaDataSource(mdatasource);
xaDataSource.setMaxPoolSize(30);
xaDataSource.setUniqueResourceName("axds1");
return xaDataSource;
}
#Bean(name = "twojdbcTemplate")
public JdbcTemplate twojdbcTemplate() {
return new JdbcTemplate(articleDataSource());
}
}
TransactionConfig.
#Configuration
#EnableTransactionManagement
#ComponentScan(basePackages="cn.crazychain")
public class TransactionConfig {
#Bean(name = "userTransaction")
public UserTransaction userTransaction() throws Throwable {
UserTransactionImp userTransactionImp = new UserTransactionImp();
userTransactionImp.setTransactionTimeout(10000);
//return new BitronixTransactionManager();
return userTransactionImp;
}
#Bean(name = "atomikosTransactionManager", initMethod = "init", destroyMethod = "close")
//#Bean(name = "atomikosTransactionManager")
public TransactionManager atomikosTransactionManager() throws Throwable {
UserTransactionManager userTransactionManager = new UserTransactionManager();
userTransactionManager.setForceShutdown(false);
//return TransactionManagerServices.getTransactionManager();
return userTransactionManager;
}
#Bean(name = "customerJtaTransactionManager")
#DependsOn({ "userTransaction", "atomikosTransactionManager" })
public PlatformTransactionManager transactionManager() throws Throwable {
UserTransaction userTransaction = userTransaction();
TransactionManager atomikosTransactionManager = atomikosTransactionManager();
return new JtaTransactionManager(userTransaction, atomikosTransactionManager);
}
}
Whether there is bug in hibernate jpa whith ax.
The conclusion is that JTA works fine with jdbc and AtomikosDataSourceBean.
The origin reference open source project is https://github.com/fabiomaffioletti/mul-at.git
The whole source code of mine is
https://github.com/lxiaodao/crazychain.
JTA transaction manager will only work if you use JNDI. JTA tx manager listens to Datasource and bring under a transaction only if the datasource bean is in Java/Web container and not in app. container.
Either you need to use JNDI for JTA to work or start using JPA transaction manager.
JTA transaction manager is mainly used in Distributed Transaction and is prone to transaction rollback failures.

Spring 4 with JPA (Hibernate implementation): Transaction not working (no transaction in progress)

Ok, this looks to be a repeated question, however, I have been searching for this over 2 days and no success. Below are the configuration details:
Annotated App Config class
#Configuration
#ComponentScan(basePackages = "com.test")
#EnableTransactionManagement(mode=AdviceMode.PROXY, proxyTargetClass=true)
public class AnnotatedAppConfig {
private static final Logger _logger = Logger
.getLogger(AnnotatedAppConfig.class);
#Bean
public DataSource dataSource() {
// C3P0 datasource configuration
final ComboPooledDataSource dataSource = new ComboPooledDataSource();
try {
dataSource.setDriverClass(ReaderUtil.getInstance()
.getProperty(IConst.DB_DRIVER));
} catch (PropertyVetoException e) {
_logger.error("Error setting driver class ", e);
}
dataSource.setUser(ReaderUtil.getInstance().getProperty(
IConst.DB_USER));
dataSource.setPassword(ReaderUtil.getInstance().getProperty(
IConst.DB_PASSWORD));
dataSource.setJdbcUrl(ReaderUtil.getInstance().getProperty(
IConst.DB_URL));
_logger.info("Datasource created successfully");
return dataSource;
}
#Bean
public LocalContainerEntityManagerFactoryBean entityManagerFactory() {
final LocalContainerEntityManagerFactoryBean entityManagerFactoryBean = new LocalContainerEntityManagerFactoryBean();
entityManagerFactoryBean.setDataSource(dataSource());
entityManagerFactoryBean.setPersistenceUnitName("testunit");
entityManagerFactoryBean.setJpaVendorAdapter(createJPAVendorAdapter());
_logger.info("EntityManagerFactory created successfully");
return entityManagerFactoryBean;
}
#Bean
public PlatformTransactionManager txManager() {
final JpaTransactionManager transactionManager = new JpaTransactionManager();
transactionManager.setEntityManagerFactory(entityManagerFactory()
.getObject());
transactionManager.setDataSource(dataSource());
_logger.info("Transaction Manager created successfully");
return transactionManager;
}
private HibernateJpaVendorAdapter createJPAVendorAdapter() {
final HibernateJpaVendorAdapter jpaVendorAdapter = new HibernateJpaVendorAdapter();
jpaVendorAdapter.setShowSql(true);
jpaVendorAdapter.setGenerateDdl(false);
jpaVendorAdapter.setDatabase(Database.MYSQL);
jpaVendorAdapter.setDatabasePlatform(ReaderUtil.getInstance()
.getProperty(IConst.HIBERNATE_DB_DIALECT));
return jpaVendorAdapter;
}
}
Annotated Service Class
#Service
#Transactional(value="txManager")
public class TestServiceImpl extends BaseServiceImpl implements ITestService {
#Autowired
private ITestDAO testDAO;
#Override
#Transactional(propagation=Propagation.REQUIRES_NEW, readOnly=false, value="txManager")
public Long register(final String username, final String password,
final String name, final String address, final Integer deptId) {
return testDAO.register(username, password, name, address, deptId);
}
}
When ever I try to invoke the register method, then the below error is thrown (in DEBUG mode):
TransactionAspectSupport.completeTransactionAfterThrowing(534) | Completing transaction for [com.test.service.TestServiceImpl.register] after exception: javax.persistence.TransactionRequiredException: no transaction is in progress
07 Jul 2015 18:59:36,488 230371 [http-bio-9080-exec-5]:DEBUG - RuleBasedTransactionAttribute.rollbackOn(131) | Applying rules to determine whether transaction should rollback on javax.persistence.TransactionRequiredException: no transaction is in progress
I have tried all that I could find on the net, however, no luck.
Any experts, please help me know what is missing in the configuration.
PLEASE NOTE: Autowiring is working fine, read transactions (SELECT queries) are working fine.
I read somewhere that #Service & #Transactional annotations do not work if applied together on the same class (which is happening in my case, TestServiceImpl has both the annotations). Is this the case? What would be the workaround in such a situation?

#Transactional on Spring shutdown to properly shutdown Hsqldb

The heart of this question is: Is it possible to execute a Transaction from a method triggered by a Spring shutdown hook?
At the moment I have a HyperSqlDbServer class that implements SmartLifeCycle as found in this question:
In a spring bean is it possible to have a shutdown method which can use transactions?
I have a method in that class that is marked transactional that gets invoked as part of the stop method:
#Transactional
public void executeShutdown() {
hsqlDBShutdownService.executeShutdownQuery();
hsqlDBShutdownService.closeEntityManager();
}
The service used in that method is a bit of a hack that I had to do because I could not autowire in the EntityManager to this class:
#Service
public class HsqlDBShutdownService {
#PersistenceContext
private EntityManager entityManager;
#Autowired
private HyperSqlDbServer hyperSqlDbServer;
#Transactional
public void executeShutdownQuery() {
entityManager.createNativeQuery("SHUTDOWN").executeUpdate();
}
#Transactional
public void closeEntityManager() {
entityManager.close();
}
#PostConstruct
public void setHsqlDBShutdownService() {
hyperSqlDbServer.setShutdownService(this);
}
}
You may notice that all I'm really trying to accomplish is invoking the query "SHUTDOWN" before stopping the server. Without this, the hsqldb lock file sticks around on server restart, and the server throws an exception.
The code above produces the following exception:
javax.persistence.TransactionRequiredException: Executing an update/delete query
at org.hibernate.ejb.AbstractQueryImpl.executeUpdate(AbstractQueryImpl.java:96)
...
So my original question stands, but if anyone has a thought on how I could execute this query another way I'll try that as well.
FYI, I've also tried the #PreDestroy annotation, but get the same TransactionRequiredException.
Edit: For completeness, I am using the JpaTransactionManager and the #Transactional annotations work throughout my project, except on shutdown...
Edit 2: Datasource and transaction manager configuration:
#Configuration
#EnableTransactionManagement
#PropertySource("classpath:persistence.properties")
public class PersistenceConfig implements TransactionManagementConfigurer {
private static final String PASSWORD_PROPERTY = "dataSource.password";
private static final String USERNAME_PROPERTY = "dataSource.username";
private static final String URL_PROPERTY = "dataSource.url";
private static final String DRIVER_CLASS_NAME_PROPERTY = "dataSource.driverClassName";
#Autowired
private Environment env;
#Bean
#DependsOn("hsqlDb")
public DataSource configureDataSource() {
DriverManagerDataSource dataSource = new DriverManagerDataSource();
dataSource.setDriverClassName(env.getProperty(DRIVER_CLASS_NAME_PROPERTY));
dataSource.setUrl(env.getProperty(URL_PROPERTY));
dataSource.setUsername(env.getProperty(USERNAME_PROPERTY));
dataSource.setPassword(env.getProperty(PASSWORD_PROPERTY));
return dataSource;
}
#Bean
#DependsOn("hsqlDb")
public LocalContainerEntityManagerFactoryBean configureEntityManagerFactory() {
LocalContainerEntityManagerFactoryBean entityManagerFactoryBean = new LocalContainerEntityManagerFactoryBean();
entityManagerFactoryBean.setDataSource(configureDataSource());
entityManagerFactoryBean.setPackagesToScan("com.mycompany.model.db");
entityManagerFactoryBean.setJpaVendorAdapter(new HibernateJpaVendorAdapter());
Properties jpaProperties = new Properties();
jpaProperties.put(org.hibernate.cfg.Environment.DIALECT, env.getProperty(org.hibernate.cfg.Environment.DIALECT));
jpaProperties.put(org.hibernate.cfg.Environment.HBM2DDL_AUTO, env.getProperty(org.hibernate.cfg.Environment.HBM2DDL_AUTO));
jpaProperties.put(org.hibernate.cfg.Environment.SHOW_SQL, env.getProperty(org.hibernate.cfg.Environment.SHOW_SQL));
jpaProperties.put(org.hibernate.cfg.Environment.HBM2DDL_IMPORT_FILES_SQL_EXTRACTOR, env.getProperty(org.hibernate.cfg.Environment.HBM2DDL_IMPORT_FILES_SQL_EXTRACTOR));
jpaProperties.put(org.hibernate.cfg.Environment.HBM2DDL_IMPORT_FILES, env.getProperty(org.hibernate.cfg.Environment.HBM2DDL_IMPORT_FILES));
entityManagerFactoryBean.setJpaProperties(jpaProperties);
return entityManagerFactoryBean;
}
#Override
#Bean()
#DependsOn("hsqlDb")
public PlatformTransactionManager annotationDrivenTransactionManager() {
return new JpaTransactionManager();
}
}
I found a workaround for shutting down the HsqlDB database, but it involves avoiding the use of Spring's EntityManager and #Transactional as they apparently do not work during server shutdown. My modified HsqlDBShutdownService is below. The key change is that instead of using the EntityManager to invoke the query, I create a new jdbc connection manually, and invoke the query that way. This avoids the requirement for #Transactional:
#Service
public class HsqlDBShutdownService {
#Autowired
private ApplicationContext applicationContext;
#PersistenceContext
private EntityManager entityManager;
#Autowired
private HyperSqlDbServer hyperSqlDbServer;
public void executeShutdownQuery() {
Connection conn = null;
try {
JdbcTemplate jdbcTemplate = new JdbcTemplate(this.applicationContext.getBean(DataSource.class));
conn = DataSourceUtils.getConnection(jdbcTemplate.getDataSource());
conn.setAutoCommit(true);
jdbcTemplate.execute("SHUTDOWN");
} catch(Exception ex) {
ex.printStackTrace();
} finally {
try {
if(conn != null)
conn.close();
} catch(Exception ex) {
ex.printStackTrace();
}
}
}
#Transactional
public void closeEntityManager() {
entityManager.close();
}
#PostConstruct
public void setHsqlDBShutdownService() {
hyperSqlDbServer.setShutdownService(this);
}
}
The server can now restart successfully without leaving Hsqldb lock files around.

Resources