Use of resource-manager specific transaction demarcation API in EJB 3.x - ejb-3.0

Accordinong to EJB 3.0 specification: While an instance is in a transaction, the instance must not attempt to use the resource-manager specific transaction demarcation API (e.g. it must not invoke the
commit or rollback method on the java.sql.Connection interface or on the
javax.jms.Session interface) In 13.3.3 of Specification.
I tried one example - where in BEAN managed transaction I included java.sql.Connection.commit() - created Stateless bean in NetBeans as EE5, deployed on Glassfish 3.1 and container did not complain? Bean method updates the database without any errors in Glassfish log. Is this expected behavior?
Also, there is no such restriction on using java.sql.Connection.commit() for beans with container transaction managed transactions mentioned in specification.
Thanks
Branislav
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package ejb;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.Resource;
import javax.ejb.*;
import javax.persistence.Transient;
import javax.sql.DataSource;
import javax.transaction.*;
/**
*
* #author bane
*/
#Stateless
#TransactionManagement(TransactionManagementType.BEAN)
public class MySession implements MySessionRemote {
#Resource(name = "SAMPLE")
private DataSource SAMPLE;
//
#Resource UserTransaction utx;
//gore je novi kod
#Override
public String getResult() {
return "This is my Session Bean";
}
public void doSomething() {
try {
Connection conn = SAMPLE.getConnection();
Statement stmt = conn.createStatement();
String q = "select * from BOOK";
String up = "update BOOK set PRICE = PRICE + 1";
utx.begin();
int num = stmt.executeUpdate(up);
System.out.println("num: "+num);
ResultSet rs = stmt.executeQuery(q);
//is conn.commit() legal?
conn.commit();
String name = null;
int price = 0;
while (rs.next()) {
name = rs.getString(2);
price = rs.getInt(3);
System.err.println(name+" , "+price);
}
utx.commit();
} catch (SQLException ex) {
Logger.getLogger(MySession.class.getName()).log(Level.SEVERE, null, ex);
} catch (Exception ex) {
Logger.getLogger(MySession.class.getName()).log(Level.SEVERE, null, ex);
}
}
// Add business logic below. (Right-click in editor and choose
// "Insert Code > Add Business Method")
}

Related

Invalid Address JMSException when using temporary credentials with SQSSession

I am getting an error trying to connect to an SQS queue in another AWS account using JMS. I have tried to follow the approach taken in this answer, but I am receiving the following error:
com.amazonaws.services.sqs.model.AmazonSQSException: The address https://sqs.us-east-1.amazonaws.com/ is not valid for this endpoint. (Service: AmazonSQS; Status Code: 404; Error Code: InvalidAddress; Request ID: d7f72bd3-6240-5f63-b313-70c2d8978c14; Proxy: null)
Unlike in the post mentioned above (which I believe has the account credentials in the default provider chain?) I am trying to assume a role that has access to this SQS queue. Is this not possible through JMS or am I doing something incorrectly?
import com.amazon.sqs.javamessaging.ProviderConfiguration;
import com.amazon.sqs.javamessaging.SQSConnectionFactory;
import com.amazon.sqs.javamessaging.SQSSession;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.securitytoken.AWSSecurityTokenService;
import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient;
import com.amazonaws.services.sqs.AmazonSQSClientBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jms.config.DefaultJmsListenerContainerFactory;
import org.springframework.jms.core.JmsTemplate;
import org.springframework.jms.support.destination.DynamicDestinationResolver;
import javax.jms.ConnectionFactory;
import javax.jms.JMSException;
import javax.jms.Queue;
import javax.jms.Session;
/**
* A configuration class for JMS to poll an SQS queue
* in another AWS account
*/
#Configuration
public class TranslationJmsConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(TranslationJmsConfig.class);
#Value("${iam.connection.arn}")
private String connectionRoleArn;
#Value("${account.id}")
private String brokerAccountId;
/**
* JmsListenerContainerFactory bean for translation processing response queue
*
* #param concurrentConsumers number of concurrent consumers
* #param maxConcurrentConsumers max number of concurrent consumers
* #return An instance of JmsListenerContainerFactory
*/
#Bean("translationJmsListenerContainerFactory")
public DefaultJmsListenerContainerFactory translationJmsListenerContainerFactory(
#Value("#{new Integer('${listener.concurrency}')}") int concurrentConsumers,
#Value("#{new Integer('${listener.max.concurrency}')}") int maxConcurrentConsumers) {
DefaultJmsListenerContainerFactory factory =
new DefaultJmsListenerContainerFactory();
factory.setConnectionFactory(getConnectionFactory(connectionRoleArn));
factory.setDestinationResolver(new SqsDynamicDestinationResolver(brokerAccountId));
factory.setSessionTransacted(false); //SQS does not support transaction.
factory.setSessionAcknowledgeMode(Session.CLIENT_ACKNOWLEDGE); // Automatic message acknowledgment after successful listener execution; best-effort redelivery in case of a user exception thrown as well as in case of other listener execution interruptions (such as the JVM dying).
factory.setConcurrency(String.format("%d-%d", concurrentConsumers, maxConcurrentConsumers));
return factory;
}
/**
* create custom JMS Template
* #return JmsTemplate
*/
#Bean
public JmsTemplate customJmsTemplate() {
JmsTemplate jmsTemplate = new JmsTemplate(getConnectionFactory(connectionRoleArn));
jmsTemplate.setDestinationResolver(new SqsDynamicDestinationResolver(brokerAccountId));
return jmsTemplate;
}
/**
* A dynamic destination resolver for sqs queue
*/
public class SqsDynamicDestinationResolver extends DynamicDestinationResolver {
private final String brokerAccountId;
/**
* Constructor
* #param brokerAccountId broker Account Id
*/
public SqsDynamicDestinationResolver(String brokerAccountId) {
this.brokerAccountId = brokerAccountId;
}
#Override
protected Queue resolveQueue(Session session, String queueName) throws JMSException {
if (session instanceof SQSSession) {
SQSSession sqsSession = (SQSSession) session;
return sqsSession.createQueue(queueName, brokerAccountId); // 404 invalid address -- Something wrong with creds?
}
return super.resolveQueue(session, queueName);
}
}
private ConnectionFactory getConnectionFactory(String connectionRoleArn){
AWSSecurityTokenService stsClient = AWSSecurityTokenServiceClient.builder()
.build();
// assume the connector account credentials -> so we can assume customer account using chaining
AWSCredentialsProvider dummyCredentialProviders = IdentityHelpers.assumeInternalRole(stsClient, connectionRoleArn); // A helper that assumes temporary creds
return new SQSConnectionFactory(
new ProviderConfiguration(),
AmazonSQSClientBuilder.standard()
.withRegion(Regions.US_EAST_1)
.withCredentials(dummyCredentialProviders)
);
}
}
I realized that when using the temporary credentials, I didn't need the second parameter (the account id) of the sqsSession.createQueue call. so once i changed
sqsSession.createQueue(queueName, brokerAccountId);
To:
return sqsSession.createQueue(queueName);
it worked fine. I guess i missunderstood the need for the account id. I assume the parameter is used when you have multiple accounts in your providerChain and you want it to search a specific account? Any light on this would still be appreciated!

How to Understand if a Batch ended in a Batch To Record Adapter

I am developing a springboot application that reads messages from a topic. Messages are managed in transaction and read as string in batch mode and then deserialized to an object. This operation may fail but I don't want to discard all the batch but rather I would move failed messages to DLQ.
As I am using spring-kafka 2.6.5 I found out that I can use BatchToRecordAdapter in order to achieve this purpose. However I did not find out how to know when I am reading the last message of any batch.
I would like to read one message at a time, serialize it and then store in an ArrayList; when listener reads the last message I want to make some processing and finally commit the transaction.
Thanks,
Giuseppe.
UPDATE
In order to achieve this purpose I override BatchToRecordAdapter and added headers that allow me to know the position in a batch of every element.
package com.doxee.commons.lifecycle.kafka;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.listener.ConsumerRecordRecoverer;
import org.springframework.kafka.listener.adapter.BatchToRecordAdapter;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.messaging.Message;
import org.springframework.messaging.support.MessageBuilder;
import org.springframework.util.Assert;
/*
* Insert a description here.
*
* Bugs: none known
*
* #author gmiano gmiano#doxee.com
* #createDate 25/01/21
*
* Copyright (C) 2021 Doxee S.p.A. C.F. - P.IVA: IT02714390362. All Rights Reserved
*/
#Slf4j
public class BatchToEnrichedRecordAdapter<K, V> implements BatchToRecordAdapter<K, V> {
private final ConsumerRecordRecoverer recoverer;
public BatchToEnrichedRecordAdapter(ConsumerRecordRecoverer recoverer) {
Assert.notNull(recoverer, "'recoverer' cannot be null");
this.recoverer = recoverer;
}
#Override
public void adapt(List<Message<?>> messages, List<ConsumerRecord<K, V>> records,
Acknowledgment ack, Consumer<?, ?> consumer, Callback<K, V> callback) {
for (int i = 0; i < messages.size(); ++i) {
Message enrichedMessage = MessageBuilder.fromMessage(messages.get(i))
.setHeader(MyHeaders.BATCH_SIZE, messages.size())
.setHeader(MyHeaders.MESSAGE_BATCH_POSITION, i + 1)
.build();
try {
callback.invoke(records.get(i), ack, consumer, enrichedMessage);
} catch (Exception var9) {
this.recoverer.accept(records.get(i), var9);
}
}
}
}
with this bean as recoverer
#Bean
ConsumerRecordRecoverer recoverer(KafkaOperations<?, ?> template) {
return new DeadLetterPublishingRecoverer(template, (record, ex) -> {
String srcTopic = record.topic();
String srcKey = record.key().toString();
log.error("Failed consume of message {} from topic {}", srcKey, srcTopic, ex);
String dstTopic;
if (ex.getCause() instanceof ClientResumableException) {
dstTopic = srcTopic.concat(".RECOVERABLE");
} else {
dstTopic = srcTopic.concat(".DLT");
}
log.error("Cannot retry. Try to write message to topic: {}", dstTopic);
return new TopicPartition(dstTopic, 0);
});
}
Is this the proper solution?

is putting sqs-consumer to detect receiveMessage event in sqs scalable

I am using aws sqs as message queue. After sqs.sendMessage sends the data , I want to detect sqs.receiveMessage via either infinite loop or event triggering in scalable way. Then I came accross sqs-consumer
to handle sqs.receiveMessage events, the moment it receives the messages. But I was wondering , is it the most suitable way to handle message passing between microservices or is there any other better way to handle this thing?
I had written the code in java for fetching the data from sqs queue with SQSBufferedAsyncClient, advantages using this API is buffered the messages in async mode.
/**
*
*/
package com.sxm.aota.tsc.config;
import java.net.UnknownHostException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonWebServiceRequest;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.regions.Region;
import com.amazonaws.regions.Regions;
import com.amazonaws.retry.RetryPolicy;
import com.amazonaws.retry.RetryPolicy.BackoffStrategy;
import com.amazonaws.services.sqs.AmazonSQSAsync;
import com.amazonaws.services.sqs.AmazonSQSAsyncClient;
import com.amazonaws.services.sqs.buffered.AmazonSQSBufferedAsyncClient;
import com.amazonaws.services.sqs.buffered.QueueBufferConfig;
#Configuration
public class SQSConfiguration {
/** The properties cache config. */
#Autowired
private PropertiesCacheConfig propertiesCacheConfig;
#Bean
public AmazonSQSAsync amazonSQSClient() {
// Create Client Configuration
ClientConfiguration clientConfig = new ClientConfiguration()
.withMaxErrorRetry(5)
.withConnectionTTL(10_000L)
.withTcpKeepAlive(true)
.withRetryPolicy(new RetryPolicy(
null,
new BackoffStrategy() {
#Override
public long delayBeforeNextRetry(AmazonWebServiceRequest req,
AmazonClientException exception, int retries) {
// Delay between retries is 10s unless it is UnknownHostException
// for which retry is 60s
return exception.getCause() instanceof UnknownHostException ? 60_000L : 10_000L;
}
}, 10, true));
// Create Amazon client
AmazonSQSAsync asyncSqsClient = null;
if (propertiesCacheConfig.isIamRole()) {
asyncSqsClient = new AmazonSQSAsyncClient(new InstanceProfileCredentialsProvider(true), clientConfig);
} else {
asyncSqsClient = new AmazonSQSAsyncClient(
new BasicAWSCredentials("sceretkey", "accesskey"));
}
final Regions regions = Regions.fromName(propertiesCacheConfig.getRegionName());
asyncSqsClient.setRegion(Region.getRegion(regions));
asyncSqsClient.setEndpoint(propertiesCacheConfig.getEndPoint());
// Buffer for request batching
final QueueBufferConfig bufferConfig = new QueueBufferConfig();
// Ensure visibility timeout is maintained
bufferConfig.setVisibilityTimeoutSeconds(20);
// Enable long polling
bufferConfig.setLongPoll(true);
// Set batch parameters
// bufferConfig.setMaxBatchOpenMs(500);
// Set to receive messages only on demand
// bufferConfig.setMaxDoneReceiveBatches(0);
// bufferConfig.setMaxInflightReceiveBatches(0);
return new AmazonSQSBufferedAsyncClient(asyncSqsClient, bufferConfig);
}
}
then written the scheduleR which executes after every 2 secs and fetches the data from queue, process it and delete it from queue before visibility timeout otherwise it will be ready for processing again when visibility tiiimeout expires again.
package com.sxm.aota.tsc.sqs;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import javax.annotation.PostConstruct;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.DependsOn;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import com.amazonaws.services.sqs.AmazonSQSAsync;
import com.amazonaws.services.sqs.model.DeleteMessageRequest;
import com.amazonaws.services.sqs.model.GetQueueUrlRequest;
import com.amazonaws.services.sqs.model.GetQueueUrlResult;
import com.amazonaws.services.sqs.model.ReceiveMessageRequest;
import com.amazonaws.services.sqs.model.ReceiveMessageResult;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* The Class TSCDataSenderScheduledTask.
*
* Sends the aggregated Vehicle data to TSC in batches
*/
#EnableScheduling
#Component("sqsScheduledTask")
#DependsOn({ "propertiesCacheConfig", "amazonSQSClient" })
public class SQSScheduledTask {
private static final Logger LOGGER = LoggerFactory.getLogger(SQSScheduledTask.class);
#Autowired
private PropertiesCacheConfig propertiesCacheConfig;
#Autowired
public AmazonSQSAsync amazonSQSClient;
/**
* Timer Task that will run after specific interval of time Majorly
* responsible for sending the data in batches to TSC.
*/
private String queueUrl;
private final ObjectMapper mapper = new ObjectMapper();
#PostConstruct
public void initialize() throws Exception {
LOGGER.info("SQS-Publisher", "Publisher initializing for queue " + propertiesCacheConfig.getSQSQueueName(),
"Publisher initializing for queue " + propertiesCacheConfig.getSQSQueueName());
// Get queue URL
final GetQueueUrlRequest request = new GetQueueUrlRequest().withQueueName(propertiesCacheConfig.getSQSQueueName());
final GetQueueUrlResult response = amazonSQSClient.getQueueUrl(request);
queueUrl = response.getQueueUrl();
LOGGER.info("SQS-Publisher", "Publisher initialized for queue " + propertiesCacheConfig.getSQSQueueName(),
"Publisher initialized for queue " + propertiesCacheConfig.getSQSQueueName() + ", URL = " + queueUrl);
}
#Scheduled(fixedDelayString = "${sqs.consumer.delay}")
public void timerTask() {
final ReceiveMessageResult receiveResult = getMessagesFromSQS();
String messageBody = null;
if (receiveResult != null && receiveResult.getMessages() != null && !receiveResult.getMessages().isEmpty()) {
try {
messageBody = receiveResult.getMessages().get(0).getBody();
String messageReceiptHandle = receiveResult.getMessages().get(0).getReceiptHandle();
Vehicles vehicles = mapper.readValue(messageBody, Vehicles.class);
processMessage(vehicles.getVehicles(),messageReceiptHandle);
} catch (Exception e) {
LOGGER.error("Exception while processing SQS message : {}", messageBody);
// Message is not deleted on SQS and will be processed again after visibility timeout
}
}
}
public void processMessage(List<Vehicle> vehicles,String messageReceiptHandle) throws InterruptedException {
//processing code
//delete the sqs message as the processing is completed
//Need to create atomic counter that will be increamented by all TS.. Once it will be 0 then we will be deleting the messages
amazonSQSClient.deleteMessage(new DeleteMessageRequest(queueUrl, messageReceiptHandle));
}
private ReceiveMessageResult getMessagesFromSQS() {
try {
// Create new request and fetch data from Amazon SQS queue
final ReceiveMessageResult receiveResult = amazonSQSClient
.receiveMessage(new ReceiveMessageRequest().withMaxNumberOfMessages(1).withQueueUrl(queueUrl));
return receiveResult;
} catch (Exception e) {
LOGGER.error("Error while fetching data from SQS", e);
}
return null;
}
}

AEM 6.3 : Creating Scheduler using OSGi R6 annotations

I have written a scheduler using OSGi R6 annotations but it doesn't seem to run :
package com.aem.sites.interfaces;
import org.osgi.service.metatype.annotations.AttributeDefinition;
import org.osgi.service.metatype.annotations.AttributeType;
import org.osgi.service.metatype.annotations.ObjectClassDefinition;
#ObjectClassDefinition(name = "Scheduler Configuration for Weather", description = "Configuration file for Scheduler")
public #interface SchedulerConfiguration {
#AttributeDefinition(
name = "sample parameter",
description="Sample String parameter",
type = AttributeType.STRING
)
public String parameter() default "scheduler";
#AttributeDefinition(
name = "Concurrent",
description = "Schedule task concurrently",
type = AttributeType.BOOLEAN
)
boolean scheduler_concurrent() default true;
#AttributeDefinition(
name = "Expression",
description = "Cron-job expression. Default: run every minute.",
type = AttributeType.STRING
)
String scheduler_expression() default "0 * * * * ?";
}
and
package com.aem.sites.schedulers;
import org.osgi.service.component.annotations.Component;
import org.osgi.service.component.annotations.Activate;
import org.osgi.service.metatype.annotations.Designate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.aem.sites.interfaces.SchedulerConfiguration;
#Component(immediate = true,
configurationPid = "com.aem.sites.schedulers.WeatherServiceScheduler")
#Designate(ocd=SchedulerConfiguration.class)
public class WeatherServiceScheduler implements Runnable {
private final Logger logger = LoggerFactory.getLogger(this.getClass());
private String myParameter;
#Override
public void run() {
logger.info("*******************************************Sample OSGi Scheduler is now running", myParameter);
}
#Activate
public void activate(SchedulerConfiguration config) {
logger.info("*******************************************weather service scheduler"+ myParameter);
myParameter = config.parameter();
}
}
I am following this https://github.com/nateyolles/aem-osgi-annotation-demo/blob/master/core/src/main/java/com/nateyolles/aem/osgiannotationdemo/core/schedulers/SampleOsgiScheduledTask.java but looks like I am doing something wrong here. Not sure what though.
Thanks in advance
In your WeatherSchedulerService class, you are not registering it as a service. Instead of configurationPid, you can do like this service = Runnable.class.
The correct way to create a SlingScheduler using OSGi R6 annotations is as follows -
Create your OSGi configuration class
import org.osgi.service.metatype.annotations.AttributeDefinition;
import org.osgi.service.metatype.annotations.AttributeType;
import org.osgi.service.metatype.annotations.ObjectClassDefinition;
#ObjectClassDefinition(name = "Sling Scheduler Configuration", description = "This configuration is used to demonstrates a sling scheduler in action")
public #interface SchedulerConfiguration {
#AttributeDefinition(
name = "Scheduler name",
description = "Name of the scheduler",
type = AttributeType.STRING)
public String name() default "Custom Sling Scheduler";
#AttributeDefinition(
name = "Enabled",
description = "Flag to enable/disable a scheduler",
type = AttributeType.STRING)
public boolean enabled() default false;
#AttributeDefinition(
name = "Cron expression",
description = "Cron expression used by the scheduler",
type = AttributeType.STRING)
public String cronExpression() default "0 * * * * ?";
#AttributeDefinition(
name = "Custom parameter",
description = "Custom parameter to showcase the usage of a sling scheduler",
type = AttributeType.STRING)
public String customParameter();
}
Create your Scheduler class as a service. For creating an OSGi service using R6 annotations we use #Component(service=<your-interface>.class,...).
Thus, create a service as follows
import org.apache.sling.commons.scheduler.ScheduleOptions;
import org.apache.sling.commons.scheduler.Scheduler;
import org.osgi.service.component.annotations.Activate;
import org.osgi.service.component.annotations.Component;
import org.osgi.service.component.annotations.Deactivate;
import org.osgi.service.component.annotations.Modified;
import org.osgi.service.component.annotations.Reference;
import org.osgi.service.metatype.annotations.Designate;
import org.redquark.aem.learning.core.configurations.SchedulerConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
#Component(immediate = true, service = Runnable.class)
#Designate(ocd = SchedulerConfiguration.class)
public class CustomScheduler implements Runnable {
// Logger
private final Logger log = LoggerFactory.getLogger(this.getClass());
// Custom parameter that is to be read from the configuration
private String customParameter;
// Id of the scheduler based on its name
private int schedulerId;
// Scheduler instance injected
#Reference
private Scheduler scheduler;
/**
* Activate method to initialize stuff
*
* #param schedulerConfiguration
*/
#Activate
protected void activate(SchedulerConfiguration schedulerConfiguration) {
schedulerId = schedulerConfiguration.name().hashCode();
customParameter = schedulerConfiguration.customParameter();
}
/**
* Modifies the scheduler id on modification
*
* #param schedulerConfiguration
*/
#Modified
protected void modified(SchedulerConfiguration schedulerConfiguration) {
// Removing scheduler
removeScheduler();
// Updating the scheduler id
schedulerId = schedulerConfiguration.name().hashCode();
// Again adding the scheduler
addScheduler(schedulerConfiguration);
}
/**
* This method deactivates the scheduler and removes it
*
* #param schedulerConfiguration
*/
#Deactivate
protected void deactivate(SchedulerConfiguration schedulerConfiguration) {
// Removing the scheduler
removeScheduler();
}
/**
* This method removes the scheduler
*/
private void removeScheduler() {
log.info("Removing scheduler: {}", schedulerId);
// Unscheduling/removing the scheduler
scheduler.unschedule(String.valueOf(schedulerId));
}
/**
* This method adds the scheduler
*
* #param schedulerConfiguration
*/
private void addScheduler(SchedulerConfiguration schedulerConfiguration) {
// Check if the scheduler is enabled
if (schedulerConfiguration.enabled()) {
// Scheduler option takes the cron expression as a parameter and run accordingly
ScheduleOptions scheduleOptions = scheduler.EXPR(schedulerConfiguration.cronExpression());
// Adding some parameters
scheduleOptions.name(schedulerConfiguration.name());
scheduleOptions.canRunConcurrently(false);
// Scheduling the job
scheduler.schedule(this, scheduleOptions);
log.info("Scheduler added");
} else {
log.info("Scheduler is disabled");
}
}
/**
* Overridden run method to execute Job
*/
#Override
public void run() {
log.info("Custom Scheduler is now running using the passed custom paratmeter, customParameter {}",
customParameter);
}
In the activate() method, we are reading the required values. Then we are getting the schedulerId from the scheduler name.
The modified() method recalculates the schedulerId in case the OSGi configuration is modified.
In the addScheduler() method, we are registering the scheduler using the Scheduler API.
For more information and step by step execution, you can see my blog post as well - Day 13: Schedulers in AEM
I hope this helps. Happy coding!
There is no need for configurationPid in the class annotation, and also you are missing service=Runnable.class which should follow immediate=true, i.e. the class declaration should look like:
#Component(immediate = true, service=Runnable.class)
#Designate(ocd=SchedulerConfiguration.class)
public class WeatherServiceScheduler implements Runnable {

Does any embedded DB support JSON datatype?

I'm developing a Spring based web application with postgresql as database. I'm using JSON Datatype in postgresql. I have configured the entity with hibernate custom user type to support JSON Datatype.
Now i want to test my DAO objects using any embedded DB. Is there any embedded DB that support JSON data type which can be used in spring application.
When you use database specific features - like JSON support in PostgreSQL, for safety you have to use the same type of database for testing. In your case you want to test your DAO objects:
assume that PostgreSQL is installed on localhost and make sure that it is the case for all environments where tests run
or even better - try using otj-pg-embedded which downloads and starts PostgreSQL for JUnit tests (I haven't used it in real life projects)
Update
If you are able to run Docker in your test environment instead of embedded databases use real Postgres via TestContainers
package miniCodePrjPkg;
import java.util.List;
import java.util.Map;
import org.apache.commons.dbutils.QueryRunner;
import org.apache.commons.dbutils.handlers.MapListHandler;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
//import com.wix.mysql.EmbeddedMysql;
//
//import static com.wix.mysql.EmbeddedMysql.anEmbeddedMysql;
//import static com.wix.mysql.ScriptResolver.classPathScript;
//import static com.wix.mysql.distribution.Version.v5_7_latest;
public class DslQueryCollList {
public static void main(String[] args) throws Exception {
// apache comm coll cant ,only array is ok..cant json_object eff
// Map m=Maps.
Map myMap = Maps.newHashMap(ImmutableMap.of("name", 999999999, "age", 22));
Map myMap2 = Maps.newHashMap(ImmutableMap.of("name", 8888888, "age", 33));
List li = new ImmutableList.Builder().add(myMap).add(myMap2).build();
System.out.println(li);
// /db/xx.sql
// EmbeddedMysql mysqld = anEmbeddedMysql(v5_7_latest)
// .addSchema("aschema", classPathScript("iniListCache.sql"))
// .start();
// this just start..and u need a cliednt as common to conn..looks trouble than
// sqlite
String sql = " json_extract(jsonfld,'$.age')>30";
List<Map<String, Object>> query = queryList(sql, li);
System.out.println(query);
// run.query(conn, sql, rsh)
}
private static List<Map<String, Object>> queryList(String sql_query, List li)
throws ClassNotFoundException, SQLException, JsonProcessingException {
sql_query="SELECT * FROM sys_data where "+sql_query;
String sql = null;
Class.forName("org.sqlite.JDBC");
Connection c = DriverManager.getConnection("jdbc:sqlite:test.db");
Statement stmt = c.createStatement();
String sql2 = "drop TABLE sys_data ";
exeUpdateSafe(stmt, sql2);
sql2 = "CREATE TABLE sys_data (jsonfld json )";
exeUpdateSafe(stmt, sql2);
// insert into facts values(json_object("mascot", "Our mascot is a dolphin name
// sakila"));
//
for (Object object : li) {
String jsonstr = new ObjectMapper().writeValueAsString(object);
sql = "insert into sys_data values('" + jsonstr + "');";
// sql = "insert into sys_data values('{\"id\":\"19\", \"name\":\"Lida\"}');";
exeUpdateSafe(stmt, sql);
}
//sql = "SELECT json_extract(jsonfld,'$.name') as name1 FROM sys_data limit 1;";
// System.out.println(sql);
QueryRunner run = new QueryRunner();
// maphandler scare_handler
System.out.println(sql_query);
List<Map<String, Object>> query = run.query(c, sql_query, new MapListHandler());
System.out.println(query);
List li9=Lists.newArrayList();
for (Map<String, Object> map : query) {
li9.add(map.get("jsonfld"));
}
return li9;
}
private static void exeUpdateSafe(Statement stmt, String sql2) throws SQLException {
try {
System.out.println(sql2);
System.out.println(stmt.executeUpdate(sql2));
} catch (Exception e) {
e.printStackTrace();
}
}
}

Resources