Spring cache binded only to the current transaction - spring

I'm trying to convince my company to work with spring 3.2 's cache (I know it's very old).
The application is build on top alfresco 5.x (which is build on top of spring 3.2).
Currently, we have some cache binded to the current transaction :
if (AlfrescoTransactionSupport.getTransactionReadState() == TxnReadState.TXN_READ_ONLY) {
cache = (Map<String, Boolean>) AlfrescoTransactionSupport.getResource(CACHED_NAME);
if (cache == null) {
cache = new HashMap<String, Boolean>();
}
AlfrescoTransactionSupport.bindResource(CACHED_NAME, cache);
}
The cache live only for the current read transaction and then, destroyed.
I've tryied
#Cacheable("cache_name")
#Transactional(readOnly=true)
Annotation, but when a read-write transaction is open, the cache is not destroyed.
Any idea how to do that in spring way ?

#biiyamn was right,
I had to implement my own cache to do that.
First of all, i had ti implement the BeanFactory :
import org.springframework.beans.factory.BeanNameAware;
import org.springframework.beans.factory.FactoryBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.util.StringUtils;
public class KReadTransactionCacheFactoryBean implements FactoryBean<KReadTransactionCache>, BeanNameAware,
InitializingBean {
private String name = "";
private boolean allowNullValues = true;
private KReadTransactionCache cache;
/**
* Specify the name of the cache.
* <p>Default is "" (empty String).
*/
public void setName(String name) {
this.name = name;
}
/**
* Set whether to allow {#code null} values
* (adapting them to an internal null holder value).
* <p>Default is "true".
*/
public void setAllowNullValues(boolean allowNullValues) {
this.allowNullValues = allowNullValues;
}
public void setBeanName(String beanName) {
if (!StringUtils.hasLength(this.name)) {
setName(beanName);
}
}
public void afterPropertiesSet() {
this.cache = new KReadTransactionCache(this.name, this.allowNullValues);
}
public KReadTransactionCache getObject() {
return this.cache;
}
public Class<?> getObjectType() {
return KReadTransactionCache.class;
}
public boolean isSingleton() {
return false;
}
}
Then, implement de cache binded to current transaction
import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport;
import org.alfresco.repo.transaction.AlfrescoTransactionSupport.TxnReadState;
import org.springframework.cache.Cache;
import org.springframework.cache.support.SimpleValueWrapper;
public class KReadTransactionCache implements Cache {
private static final Object NULL_HOLDER = new NullHolder();
private final String name;
private final boolean allowNullValues;
/**
* Create a new ConcurrentMapCache with the specified name.
* #param name the name of the cache
*/
public KReadTransactionCache(String name) {
this(name, true);
}
protected static Map<Object, Object> getBindedCache(String name) {
Map<Object, Object> cache = null;
if (AlfrescoTransactionSupport.getTransactionReadState() == TxnReadState.TXN_READ_ONLY) {
cache = AlfrescoTransactionSupport.getResource(name);
if (cache == null) {
cache = new HashMap<>();
}
AlfrescoTransactionSupport.bindResource(name, cache);
}
return cache;
}
/**
* Create a new Map with the specified name and the
* given internal ConcurrentMap to use.
* #param name the name of the cache
* #param allowNullValues whether to allow {#code null} values
* (adapting them to an internal null holder value)
*/
public KReadTransactionCache(String name, boolean allowNullValues) {
this.name = name;
this.allowNullValues = allowNullValues;
}
public String getName() {
return this.name;
}
public Map getNativeCache() {
return getBindedCache(name);
}
public boolean isAllowNullValues() {
return this.allowNullValues;
}
public ValueWrapper get(Object key) {
final Map<Object, Object> bindedCache = getBindedCache(name);
if (bindedCache == null) {
return null;
}
Object value = bindedCache.get(key);
return (value != null ? new SimpleValueWrapper(fromStoreValue(value)) : null);
}
public void put(Object key, Object value) {
final Map<Object, Object> bindedCache = getBindedCache(name);
if (bindedCache == null) {
return;
}
bindedCache.put(key, toStoreValue(value));
}
public void evict(Object key) {
final Map<Object, Object> bindedCache = getBindedCache(name);
if (bindedCache == null) {
return;
}
bindedCache.remove(key);
}
public void clear() {
final Map<Object, Object> bindedCache = getBindedCache(name);
if (bindedCache == null) {
return;
}
bindedCache.clear();
}
/**
* Convert the given value from the internal store to a user value
* returned from the get method (adapting {#code null}).
* #param storeValue the store value
* #return the value to return to the user
*/
protected Object fromStoreValue(Object storeValue) {
if (this.allowNullValues && storeValue == NULL_HOLDER) {
return null;
}
return storeValue;
}
/**
* Convert the given user value, as passed into the put method,
* to a value in the internal store (adapting {#code null}).
* #param userValue the given user value
* #return the value to store
*/
protected Object toStoreValue(Object userValue) {
if (this.allowNullValues && userValue == null) {
return NULL_HOLDER;
}
return userValue;
}
#SuppressWarnings("serial")
private static class NullHolder implements Serializable {
}
}
And the xml configuration :
<!-- *******************************
***** CACHE CONFIGURATION *****
******************************* -->
<!-- simple cache manager -->
<bean id="cacheManager" class="org.springframework.cache.support.SimpleCacheManager">
<property name="caches">
<set>
<bean class="org.springframework.cache.concurrent.ConcurrentMapCacheFactoryBean" p:name="default" />
<bean class="path.to.package.KReadTransactionCacheFactoryBean" p:name="cacheNameByAnnotation" />
<!-- TODO Add other cache instances in here -->
</set>
</property>
</bean>

SimpleCacheManager is useful for testing env as written is spring doc
SimpleCacheManager only supports static mode in which caches is predefined at config time and you are not allowed to add cache at runtime
EhCache and its associated bridge for spring EhCacheManager could be a good choice

Related

How to export metrics to Prometheus via PushGateway using Spring Boot 2.0

I upgraded Spring Boot version from 1.5.x to 2.0.1, but struggled with an issue of new Metrics.
To use micrometer In Spring boot 2.0+, I must remove the <dependency/> of micrometer-spring-legacy. Unfortunately, all of the config of management.metrics.export.prometheus.pushgateway disappeared. So how can I export metrics to pushgateway using spring boot 2.0?
Many thanks!
Unfortunately the Prometheus Pushgateway auto-configuration hasn't made it into Spring-Boot 2. Unsure if a PR which incorporates the micromter-spring-legacy setup would be accepted.
In the meantime you could try to setup your own #Configuration class which includes everything starting here.
Here's a quickly stitched together solution:
#Configuration
#EnableConfigurationProperties(PushgatewayProperties.class)
public class PushgatewayConfiguration {
#ConfigurationProperties(prefix = "management.metrics.export.prometheus.pushgateway")
public static class PushgatewayProperties {
/**
* Enable publishing via a Prometheus Pushgateway.
*/
private Boolean enabled = false;
/**
* Required host:port or ip:port of the Pushgateway.
*/
private String baseUrl = "localhost:9091";
/**
* Required identifier for this application instance.
*/
private String job;
/**
* Frequency with which to push metrics to Pushgateway.
*/
private Duration pushRate = Duration.ofMinutes(1);
/**
* Push metrics right before shut-down. Mostly useful for batch jobs.
*/
private boolean pushOnShutdown = true;
/**
* Delete metrics from Pushgateway when application is shut-down
*/
private boolean deleteOnShutdown = true;
/**
* Used to group metrics in pushgateway. A common example is setting
*/
private Map<String, String> groupingKeys = new HashMap<>();
public Boolean getEnabled() {
return enabled;
}
public void setEnabled(Boolean enabled) {
this.enabled = enabled;
}
public String getBaseUrl() {
return baseUrl;
}
public void setBaseUrl(String baseUrl) {
this.baseUrl = baseUrl;
}
public String getJob() {
return job;
}
public void setJob(String job) {
this.job = job;
}
public Duration getPushRate() {
return pushRate;
}
public void setPushRate(Duration pushRate) {
this.pushRate = pushRate;
}
public boolean isPushOnShutdown() {
return pushOnShutdown;
}
public void setPushOnShutdown(boolean pushOnShutdown) {
this.pushOnShutdown = pushOnShutdown;
}
public boolean isDeleteOnShutdown() {
return deleteOnShutdown;
}
public void setDeleteOnShutdown(boolean deleteOnShutdown) {
this.deleteOnShutdown = deleteOnShutdown;
}
public Map<String, String> getGroupingKeys() {
return groupingKeys;
}
public void setGroupingKeys(Map<String, String> groupingKeys) {
this.groupingKeys = groupingKeys;
}
}
static class PrometheusPushGatewayEnabledCondition extends AllNestedConditions {
public PrometheusPushGatewayEnabledCondition() {
super(ConfigurationPhase.PARSE_CONFIGURATION);
}
#ConditionalOnProperty(value = "management.metrics.export.prometheus.enabled", matchIfMissing = true)
static class PrometheusMeterRegistryEnabled {
//
}
#ConditionalOnProperty("management.metrics.export.prometheus.pushgateway.enabled")
static class PushGatewayEnabled {
//
}
}
/**
* Configuration for
* <a href="https://github.com/prometheus/pushgateway">Prometheus
* Pushgateway</a>.
*
* #author David J. M. Karlsen
*/
#Configuration
#ConditionalOnClass(PushGateway.class)
#Conditional(PrometheusPushGatewayEnabledCondition.class)
#Incubating(since = "1.0.0")
public class PrometheusPushGatewayConfiguration {
private final Logger logger = LoggerFactory.getLogger(PrometheusPushGatewayConfiguration.class);
private final CollectorRegistry collectorRegistry;
private final PushgatewayProperties pushgatewayProperties;
private final PushGateway pushGateway;
private final Environment environment;
private final ScheduledExecutorService executorService;
PrometheusPushGatewayConfiguration(CollectorRegistry collectorRegistry,
PushgatewayProperties pushgatewayProperties, Environment environment) {
this.collectorRegistry = collectorRegistry;
this.pushgatewayProperties = pushgatewayProperties;
this.pushGateway = new PushGateway(pushgatewayProperties.getBaseUrl());
this.environment = environment;
this.executorService = Executors.newSingleThreadScheduledExecutor((r) -> {
final Thread thread = new Thread(r);
thread.setDaemon(true);
thread.setName("micrometer-pushgateway");
return thread;
});
executorService.scheduleAtFixedRate(this::push, 0, pushgatewayProperties.getPushRate().toMillis(),
TimeUnit.MILLISECONDS);
}
void push() {
try {
pushGateway.pushAdd(collectorRegistry, job(), pushgatewayProperties.getGroupingKeys());
} catch (UnknownHostException e) {
logger.error("Unable to locate host '" + pushgatewayProperties.getBaseUrl()
+ "'. No longer attempting metrics publication to this host");
executorService.shutdown();
} catch (Throwable t) {
logger.error("Unable to push metrics to Prometheus Pushgateway", t);
}
}
#PreDestroy
void shutdown() {
executorService.shutdown();
if (pushgatewayProperties.isPushOnShutdown()) {
push();
}
if (pushgatewayProperties.isDeleteOnShutdown()) {
try {
pushGateway.delete(job(), pushgatewayProperties.getGroupingKeys());
} catch (Throwable t) {
logger.error("Unable to delete metrics from Prometheus Pushgateway", t);
}
}
}
private String job() {
String job = pushgatewayProperties.getJob();
if (job == null) {
job = environment.getProperty("spring.application.name");
}
if (job == null) {
// There's a history of Prometheus spring integration defaulting the job name to
// "spring" from when
// Prometheus integration didn't exist in Spring itself.
job = "spring";
}
return job;
}
}
}

camelContext attribute discriminator for tenant resolver , using jpa multitenant and camel routeId

i ask you how can use camelContext to get the name of route fired by an event, more in details, how can I use any kind of discriminator attribute x in camelContext for predicate decision (if x =1 then .. else ..)
For example:
I have this kind of route:
//this route use the forst database
from("direct:csvprocessor1")
.routeId("tenant1")
.from("file:src/main/resources/data/1?move=OUT&moveFailed=REFUSED")
.unmarshal(csv)
.to("bean:myCsvHandler?method=doHandleCsvData")
.setBody(constant("OK VB"))
.setHeader(Exchange.HTTP_RESPONSE_CODE, constant(200))
.setHeader(Exchange.CONTENT_TYPE, constant("text/html"));
and this other route:
//this route use tenant2, the second database
from("direct:csvprocessor1")
.routeId("tenant2")
.from("file:src/main/resources/data/2?move=OUT&moveFailed=REFUSED")
.unmarshal(csv)
.to("bean:myCsvHandler?method=doHandleCsvData")
.setBody(constant("OK 2"))
.setHeader(Exchange.HTTP_RESPONSE_CODE, constant(200))
.setHeader(Exchange.CONTENT_TYPE, constant("text/html"));
when i pick up file in 1 folder the first route named "tenant1" starts, the same happen when pick up file in 2, the second route tenant2 starts.It reads csv content and the content must be write using jpa on the right tenantX (database)
I have to retrieve routeid name in another class but this class instanced before the camel Context start so i can't inject context (because this class "BatchCurrentTenantIdentifierResolverImpl " belong to Spring database initializator). I try to add method "of" to set camelContext but i get tenant1 only, also when route 2 starts, so can't switch from tenant to another tenant (tenant is database, i have two database):
#Component
public class BatchTenantContext {
private static final Logger log = LoggerFactory.getLogger(BatchTenantContext.class);
// don't Inject, use method Of because injecton was null
CamelContext cctx;
public BatchTenantContext(){getInstance();}
private final static BatchTenantContext instance = new BatchTenantContext();
public static BatchTenantContext getInstance(){
return instance;
}
public synchronized String get() {
if (cctx != null){
Route val = cctx.getRoute("tenant1");
if (val == null){
val = cctx.getRoute("tenant2");
if (val == null){
return "";
}
else {
return "tenant_2";
}
}
else return "tenant_1";
}
return "";
}
public synchronized void of(CamelContext ctx){
cctx = ctx;
}
public CamelContext getCamelContext()
{
return cctx;
}
}
//multitenant approach, switch from one database to another
//based on BatchTenantContext resolution..
public class BatchCurrentTenantIdentifierResolverImpl implements CurrentTenantIdentifierResolver {
static final Logger log = LoggerFactory.getLogger(BatchCurrentTenantIdentifierResolverImpl.class);
#Override
public String resolveCurrentTenantIdentifier() {
String val = BatchTenantContext.getInstance().get();
log.info("*** get tenant " + val);
return val;
}
#Override
public boolean validateExistingCurrentSessions() {
return true;
}
}
So, how to know how route fire? Note thaht the class above is singleton..I'm in a right way?
I use jpa whitin hibernate provider, configured using rhe multitenant configuration like this post: http://tech.asimio.net/2017/01/17/Multitenant-applications-using-Spring-Boot-JPA-Hibernate-and-Postgres.html
The application work in spring-boot Runtime environment or with Tomcat app server.
Any ideas about all?
Thanks so much!
roby
I add this code:
#Configuration
#EnableConfigurationProperties({ MultiTenantAfSissProperties.class, JpaProperties.class })
#ImportResource(locations = { "classpath:applicationContent.xml" })
#EnableTransactionManagement
public class MultiTenantJpaConfiguration {
static final Logger logger = LoggerFactory.getLogger(MultiTenantJpaConfiguration.class);
#Inject
private JpaProperties jpaProperties;
#Inject
MultiTenantAFSISSProperties multiTenantAFSISSProperties; //lista dei datasources collegati ai tenant
#Bean
public Map<String, DataSource> dataSourceRetrieval(){
Map<String, DataSource> result = new HashMap<>();
for (DataSourceProperties dsProperties : this.multiTenantAFSISSProperties.getDataSources()) {
DataSourceBuilder factory = DataSourceBuilder
.create()
.url(dsProperties.getUrl())
.username(dsProperties.getUsername())
.password(dsProperties.getPassword())
.driverClassName(dsProperties.getDriverClassName());
result.put(dsProperties.getTenantId(), factory.build());
}
return result;
}
/**
*
* #return
*/
#Bean
public MultiTenantConnectionProvider multiTenantConnectionProvider(){
return new AfsissMultiTenantConnectionProviderImpl();
}
/**
*
* #return
*/
#Bean
public CurrentTenantIdentifierResolver currentTenantIdentifierResolver(){
return new BatchCurrentTenantIdentifierResolverImpl();
}
/**
*
* #param multiTenantConnectionProvider
* #param currentTenantIdentifierResolver
* #return
*/
#Bean
public LocalContainerEntityManagerFactoryBean entityManagerFactoryBean(MultiTenantConnectionProvider multiTenantConnectionProvider,
CurrentTenantIdentifierResolver currentTenantIdentifierResolver) {
Map<String, Object> hibernateProps = new LinkedHashMap<>();
hibernateProps.putAll(this.jpaProperties.getProperties());
Map<String,String> all = this.jpaProperties.getProperties();
for ( Map.Entry<String, String> prop : all.entrySet()){
System.out.println(" " + prop.getKey() + " = " + prop.getValue());
}
hibernateProps.put(Environment.MULTI_TENANT, MultiTenancyStrategy.DATABASE);
hibernateProps.put(Environment.MULTI_TENANT_CONNECTION_PROVIDER, multiTenantConnectionProvider);
hibernateProps.put(Environment.MULTI_TENANT_IDENTIFIER_RESOLVER, currentTenantIdentifierResolver);
// No dataSource is set to resulting entityManagerFactoryBean
LocalContainerEntityManagerFactoryBean result = new LocalContainerEntityManagerFactoryBean();
result.setPackagesToScan(new String[] { AfFileEntity.class.getPackage().getName() });
result.setJpaVendorAdapter(new HibernateJpaVendorAdapter());
result.setJpaPropertyMap(hibernateProps);
return result;
}
/**
* crea la factory per ricavare l'entity manager
* #param entityManagerFactoryBean
* #return
*/
#Bean
public EntityManagerFactory entityManagerFactory(LocalContainerEntityManagerFactoryBean entityManagerFactoryBean) {
return entityManagerFactoryBean.getObject();
}
/**
* get transaction manager
* #param entityManagerFactory
* #return
*/
#Bean
public PlatformTransactionManager txManager(EntityManagerFactory entityManagerFactory) {
SessionFactory sessionFactory = entityManagerFactory.unwrap(SessionFactory.class);
HibernateTransactionManager result = new HibernateTransactionManager();
result.setAutodetectDataSource(false);
result.setSessionFactory(sessionFactory);
return result;
}
}
In applicationContent.xml:
<jpa:repositories base-package="com.xxx.dao" transaction-manager-ref="txManager" />
<tx:annotation-driven transaction-manager="txManager" proxy-target-class="true" />
The class BatchCurrentTenantIdentifierResolverImpl is called in currentTenantIdentifierResolver() method above by spring transaction manager every time i use entity manager and transaction manager in csvHanlder :
#Component
#Transactional(propagation = Propagation.REQUIRED)
public class MyCsvHandler {
#Inject
AFMOVCrudRepository _entitymanagerMov; //it extends JpaRepository
#Inject
AFVINCCrudRepository _entityManagerVINC;//it extends JpaRepository
#Inject
AFFileCrudRepository _entityManagerAfFile;//it extends JpaRepository
static final Logger logger = LoggerFactory.getLogger(MyCsvHandler.class);
//save csv data on the right table on the right tenant
public void doHandleCsvData(List<List<String>> csvData) throws FileNotEvaluableException
{
//System.out.println("stampo..");
boolean status = true;
if (csvData.size() > 0){
AfFileEntity afbean = new AfFileEntity();
afbean.setNomeFile("test");
afbean.setDataImport(new java.sql.Timestamp(System.currentTimeMillis()));
afbean.setTipoFile("M");
afbean.setAfStatoFlusso("I");
_entityManagerAfFile.save(afbean);
long pkfile = afbean.getId();
logger.info("pkfile: " + pkfile);
int i = 1;
logger.info("file size:" + csvData.size());
for (List<String> rows : csvData){
//for (int j = 0; i < rows.size(); j++){
if (rows.get(2).trim().equalsIgnoreCase(...)){
MovEntity mbean = new MovEntity();
setMovFields(mbean, rows);
mbean.setAfFileId(afbean);
logger.info(String.valueOf((i++)) + " " + mbean);
_entitymanagerMov.save(mbean);
}
else if (rows.get(2).trim().equalsIgnoreCase(..) || rows.get(2).trim().equalsIgnoreCase(..) ) {
VincEntity vincBean = new VincEntity();
setVincFields(vincBean, rows);
vincBean.setAfFileId(afbean);
logger.info(String.valueOf((i++)) + " " + vincBean);
_entityManagerVINC.save(vincBean);
}
else {
status = false;
break;
}
}
if (!status) throw new FileNotEvaluableException("error file format");
}
}
private void setVincFields(VincEntity vincBean, List<String> rows) {
vincBean.setXXX().. and others methods
}
private void setMovFields(MovEntity mbean, List<String> rows) {
mbean.setStxxx() and other .. methods
}
return new
Something like this in your routes
from("direct:csvprocessor1").routeId("tenant2").process((Exchange e)-> {
BatchCurrentTenantIdentifierResolverImpl.tenant.set("tenant_1");
})
.from("file:src/main/resources/data/2?move=OUT&moveFailed=REFUSED")
.unmarshal().csv()
.to("bean:myCsvHandler?method=doHandleCsvData")
.setBody(constant("OK 2"))
.setHeader(Exchange.HTTP_RESPONSE_CODE, constant(200))
.setHeader(Exchange.CONTENT_TYPE, constant("text/html"));
And in your BatchCurrentTenantIdentifierResolverImpl implement it aspublic
class BatchCurrentTenantIdentifierResolverImpl {
public static ThreadLocal<String> tenant = new ThreadLocal<String>();
static final Logger log = LoggerFactory.getLogger(BatchCurrentTenantIdentifierResolverImpl.class);
#Override
public String resolveCurrentTenantIdentifier() {
String val = tenant.get();
log.info("*** get tenant " + val);
return val;
}
#Override
public boolean validateExistingCurrentSessions() {
return true;
}
}

Spring Scheduled Task running in clustered environment

I am writing an application that has a cron job that executes every 60 seconds. The application is configured to scale when required onto multiple instances. I only want to execute the task on 1 instance every 60 seconds (On any node). Out of the box I can not find a solution to this and I am surprised it has not been asked multiple times before. I am using Spring 4.1.6.
<task:scheduled-tasks>
<task:scheduled ref="beanName" method="execute" cron="0/60 * * * * *"/>
</task:scheduled-tasks>
There is a ShedLock project that serves exactly this purpose. You just annotate tasks which should be locked when executed
#Scheduled( ... )
#SchedulerLock(name = "scheduledTaskName")
public void scheduledTask() {
// do something
}
Configure Spring and a LockProvider
#Configuration
#EnableScheduling
#EnableSchedulerLock(defaultLockAtMostFor = "10m")
class MySpringConfiguration {
...
#Bean
public LockProvider lockProvider(DataSource dataSource) {
return new JdbcTemplateLockProvider(dataSource);
}
...
}
I think you have to use Quartz Clustering with JDBC-JobStore for this purpose
The is another simple and robust way to safe execute a job in a cluster. You can based on database and execute the task only if the node is the "leader" in the cluster.
Also when a node is failed or shutdown in the cluster another node became the leader.
All you have is to create a "leader election" mechanism and every time to check if your are the leader:
#Scheduled(cron = "*/30 * * * * *")
public void executeFailedEmailTasks() {
if (checkIfLeader()) {
final List<EmailTask> list = emailTaskService.getFailedEmailTasks();
for (EmailTask emailTask : list) {
dispatchService.sendEmail(emailTask);
}
}
}
Follow those steps:
1.Define the object and table that holds one entry per node in the cluster:
#Entity(name = "SYS_NODE")
public class SystemNode {
/** The id. */
#Id
#GeneratedValue(strategy = GenerationType.IDENTITY)
private Long id;
/** The name. */
#Column(name = "TIMESTAMP")
private String timestamp;
/** The ip. */
#Column(name = "IP")
private String ip;
/** The last ping. */
#Column(name = "LAST_PING")
private Date lastPing;
/** The last ping. */
#Column(name = "CREATED_AT")
private Date createdAt = new Date();
/** The last ping. */
#Column(name = "IS_LEADER")
private Boolean isLeader = Boolean.FALSE;
public Long getId() {
return id;
}
public void setId(final Long id) {
this.id = id;
}
public String getTimestamp() {
return timestamp;
}
public void setTimestamp(final String timestamp) {
this.timestamp = timestamp;
}
public String getIp() {
return ip;
}
public void setIp(final String ip) {
this.ip = ip;
}
public Date getLastPing() {
return lastPing;
}
public void setLastPing(final Date lastPing) {
this.lastPing = lastPing;
}
public Date getCreatedAt() {
return createdAt;
}
public void setCreatedAt(final Date createdAt) {
this.createdAt = createdAt;
}
public Boolean getIsLeader() {
return isLeader;
}
public void setIsLeader(final Boolean isLeader) {
this.isLeader = isLeader;
}
#Override
public String toString() {
return "SystemNode{" +
"id=" + id +
", timestamp='" + timestamp + '\'' +
", ip='" + ip + '\'' +
", lastPing=" + lastPing +
", createdAt=" + createdAt +
", isLeader=" + isLeader +
'}';
}
}
2.Create the service that a) insert the node in database , b) check for leader
#Service
#Transactional
public class SystemNodeServiceImpl implements SystemNodeService, ApplicationListener {
/** The logger. */
private static final Logger LOGGER = Logger.getLogger(SystemNodeService.class);
/** The constant NO_ALIVE_NODES. */
private static final String NO_ALIVE_NODES = "Not alive nodes found in list {0}";
/** The ip. */
private String ip;
/** The system service. */
private SystemService systemService;
/** The system node repository. */
private SystemNodeRepository systemNodeRepository;
#Autowired
public void setSystemService(final SystemService systemService) {
this.systemService = systemService;
}
#Autowired
public void setSystemNodeRepository(final SystemNodeRepository systemNodeRepository) {
this.systemNodeRepository = systemNodeRepository;
}
#Override
public void pingNode() {
final SystemNode node = systemNodeRepository.findByIp(ip);
if (node == null) {
createNode();
} else {
updateNode(node);
}
}
#Override
public void checkLeaderShip() {
final List<SystemNode> allList = systemNodeRepository.findAll();
final List<SystemNode> aliveList = filterAliveNodes(allList);
SystemNode leader = findLeader(allList);
if (leader != null && aliveList.contains(leader)) {
setLeaderFlag(allList, Boolean.FALSE);
leader.setIsLeader(Boolean.TRUE);
systemNodeRepository.save(allList);
} else {
final SystemNode node = findMinNode(aliveList);
setLeaderFlag(allList, Boolean.FALSE);
node.setIsLeader(Boolean.TRUE);
systemNodeRepository.save(allList);
}
}
/**
* Returns the leaded
* #param list
* the list
* #return the leader
*/
private SystemNode findLeader(final List<SystemNode> list) {
for (SystemNode systemNode : list) {
if (systemNode.getIsLeader()) {
return systemNode;
}
}
return null;
}
#Override
public boolean isLeader() {
final SystemNode node = systemNodeRepository.findByIp(ip);
return node != null && node.getIsLeader();
}
#Override
public void onApplicationEvent(final ApplicationEvent applicationEvent) {
try {
ip = InetAddress.getLocalHost().getHostAddress();
} catch (Exception e) {
throw new RuntimeException(e);
}
if (applicationEvent instanceof ContextRefreshedEvent) {
pingNode();
}
}
/**
* Creates the node
*/
private void createNode() {
final SystemNode node = new SystemNode();
node.setIp(ip);
node.setTimestamp(String.valueOf(System.currentTimeMillis()));
node.setCreatedAt(new Date());
node.setLastPing(new Date());
node.setIsLeader(CollectionUtils.isEmpty(systemNodeRepository.findAll()));
systemNodeRepository.save(node);
}
/**
* Updates the node
*/
private void updateNode(final SystemNode node) {
node.setLastPing(new Date());
systemNodeRepository.save(node);
}
/**
* Returns the alive nodes.
*
* #param list
* the list
* #return the alive nodes
*/
private List<SystemNode> filterAliveNodes(final List<SystemNode> list) {
int timeout = systemService.getSetting(SettingEnum.SYSTEM_CONFIGURATION_SYSTEM_NODE_ALIVE_TIMEOUT, Integer.class);
final List<SystemNode> finalList = new LinkedList<>();
for (SystemNode systemNode : list) {
if (!DateUtils.hasExpired(systemNode.getLastPing(), timeout)) {
finalList.add(systemNode);
}
}
if (CollectionUtils.isEmpty(finalList)) {
LOGGER.warn(MessageFormat.format(NO_ALIVE_NODES, list));
throw new RuntimeException(MessageFormat.format(NO_ALIVE_NODES, list));
}
return finalList;
}
/**
* Finds the min name node.
*
* #param list
* the list
* #return the min node
*/
private SystemNode findMinNode(final List<SystemNode> list) {
SystemNode min = list.get(0);
for (SystemNode systemNode : list) {
if (systemNode.getTimestamp().compareTo(min.getTimestamp()) < -1) {
min = systemNode;
}
}
return min;
}
/**
* Sets the leader flag.
*
* #param list
* the list
* #param value
* the value
*/
private void setLeaderFlag(final List<SystemNode> list, final Boolean value) {
for (SystemNode systemNode : list) {
systemNode.setIsLeader(value);
}
}
}
3.ping the database to send that your are alive
#Override
#Scheduled(cron = "0 0/5 * * * ?")
public void executeSystemNodePing() {
systemNodeService.pingNode();
}
#Override
#Scheduled(cron = "0 0/10 * * * ?")
public void executeLeaderResolution() {
systemNodeService.checkLeaderShip();
}
4.you are ready! Just check if you are the leader before execute the task:
#Override
#Scheduled(cron = "*/30 * * * * *")
public void executeFailedEmailTasks() {
if (checkIfLeader()) {
final List<EmailTask> list = emailTaskService.getFailedEmailTasks();
for (EmailTask emailTask : list) {
dispatchService.sendEmail(emailTask);
}
}
}
Batch and scheduled jobs are typically run on their own standalone servers, away from customer facing apps so it is not a common requirement to include a job in an application that is expected to run on a cluster. Additionally, jobs in clustered environments typically do not need to worry about other instances of the same job running in parallel so another reason why isolation of job instances is not a big requirement.
A simple solution would be to configure your jobs inside a Spring Profile. For example, if your current configuration is:
<beans>
<bean id="someBean" .../>
<task:scheduled-tasks>
<task:scheduled ref="someBean" method="execute" cron="0/60 * * * * *"/>
</task:scheduled-tasks>
</beans>
change it to:
<beans>
<beans profile="scheduled">
<bean id="someBean" .../>
<task:scheduled-tasks>
<task:scheduled ref="someBean" method="execute" cron="0/60 * * * * *"/>
</task:scheduled-tasks>
</beans>
</beans>
Then, launch your application on just one machine with the scheduled profile activated (-Dspring.profiles.active=scheduled).
If the primary server becomes unavailable for some reason, just launch another server with the profile enabled and things will continue to work just fine.
Things change if you want automatic failover for the jobs as well. Then, you will need to keep the job running on all servers and check synchronization through a common resource such as a database table, a clustered cache, a JMX variable, etc.
I'm using a database table to do the locking. Only one task at a time can do a insert to the table. The other one will get a DuplicateKeyException.
The insert and delete logic is handeld by an aspect around the #Scheduled annotation.
I'm using Spring Boot 2.0
#Component
#Aspect
public class SchedulerLock {
private static final Logger LOGGER = LoggerFactory.getLogger(SchedulerLock.class);
#Autowired
private JdbcTemplate jdbcTemplate;
#Around("execution(#org.springframework.scheduling.annotation.Scheduled * *(..))")
public Object lockTask(ProceedingJoinPoint joinPoint) throws Throwable {
String jobSignature = joinPoint.getSignature().toString();
try {
jdbcTemplate.update("INSERT INTO scheduler_lock (signature, date) VALUES (?, ?)", new Object[] {jobSignature, new Date()});
Object proceed = joinPoint.proceed();
jdbcTemplate.update("DELETE FROM scheduler_lock WHERE lock_signature = ?", new Object[] {jobSignature});
return proceed;
}catch (DuplicateKeyException e) {
LOGGER.warn("Job is currently locked: "+jobSignature);
return null;
}
}
}
#Component
public class EveryTenSecondJob {
#Scheduled(cron = "0/10 * * * * *")
public void taskExecution() {
System.out.println("Hello World");
}
}
CREATE TABLE scheduler_lock(
signature varchar(255) NOT NULL,
date datetime DEFAULT NULL,
PRIMARY KEY(signature)
);
dlock is designed to run tasks only once by using database indexes and constraints. You can simply do something like below.
#Scheduled(cron = "30 30 3 * * *")
#TryLock(name = "executeMyTask", owner = SERVER_NAME, lockFor = THREE_MINUTES)
public void execute() {
}
See the article about using it.
You can use Zookeeper here to elect master instance and master instance will only run the scheduled job.
One implementation here is with Aspect and Apache Curator
#SpringBootApplication
#EnableScheduling
public class Application {
private static final int PORT = 2181;
#Bean
public CuratorFramework curatorFramework() {
CuratorFramework client = CuratorFrameworkFactory.newClient("localhost:" + PORT, new ExponentialBackoffRetry(1000, 3));
client.start();
return client;
}
public static void main(String[] args) {
SpringApplication.run(Application.class, args);
}
}
Aspect class
#Aspect
#Component
public class LeaderAspect implements LeaderLatchListener{
private static final Logger log = LoggerFactory.getLogger(LeaderAspect.class);
private static final String ELECTION_ROOT = "/election";
private volatile boolean isLeader = false;
#Autowired
public LeaderAspect(CuratorFramework client) throws Exception {
LeaderLatch ll = new LeaderLatch(client , ELECTION_ROOT);
ll.addListener(this);
ll.start();
}
#Override
public void isLeader() {
isLeader = true;
log.info("Leadership granted.");
}
#Override
public void notLeader() {
isLeader = false;
log.info("Leadership revoked.");
}
#Around("#annotation(com.example.apache.curator.annotation.LeaderOnly)")
public void onlyExecuteForLeader(ProceedingJoinPoint joinPoint) {
if (!isLeader) {
log.debug("I'm not leader, skip leader-only tasks.");
return;
}
try {
log.debug("I'm leader, execute leader-only tasks.");
joinPoint.proceed();
} catch (Throwable ex) {
log.error(ex.getMessage());
}
}
}
LeaderOnlyAnnotation
#Target(ElementType.METHOD)
#Retention(RetentionPolicy.RUNTIME)
#Documented
public #interface LeaderOnly {
}
Scheduled Task
#Component
public class HelloWorld {
private static final Logger log = LoggerFactory.getLogger(HelloWorld.class);
#LeaderOnly
#Scheduled(fixedRate = 1000L)
public void sayHello() {
log.info("Hello, world!");
}
}
I am using a different approach without need to setup a database for managing the lock between the nodes.
The component is called FencedLock and is provided by Hazelcast.
We're using it to prevent another node to make some operation (not necessarily linked to schedule) but it could also be used for sharing a locks between nodes for a schedule.
For doing this, we just set up two functions helper that can create different lock names:
#Scheduled(cron = "${cron.expression}")
public void executeMyScheduler(){
// This can also be a member of the class.
HazelcastInstance hazelcastInstance = Hazelcast.newHazelcastInstance();
Lock lock = hazelcastInstance.getCPSubsystem().getLock("mySchedulerName");
lock.lock();
try {
// do your schedule tasks here
} finally {
// don't forget to release lock whatever happens: end of task or any exceptions.
lock.unlock();
}
}
Alternatively you can also release automatically the lock after a delay: let say your cron job is running every hour, you can setup an automatic release after e.g. 50 minutes like this:
#Scheduled(cron = "${cron.expression}")
public void executeMyScheduler(){
// This can also be a member of the class.
HazelcastInstance hazelcastInstance = Hazelcast.newHazelcastInstance();
Lock lock = hazelcastInstance.getCPSubsystem().getLock("mySchedulerName");
if ( lock.tryLock ( 50, TimeUnit.MINUTES ) ) {
try {
// do your schedule tasks here
} finally {
// don't forget to release lock whatever happens: end of task or any exceptions.
lock.unlock();
}
} else {
// warning: lock has been released by timeout!
}
}
Note that this Hazelcast component works very good in a cloud based environment (e.g. k8s clusters) and without need to pay for an extra database.
Here is what you need to configure:
// We need to specify the name otherwise it can conflict with internal Hazelcast beans
#Bean("hazelcastInstance")
public HazelcastInstance hazelcastInstance() {
Config config = new Config();
config.setClusterName(hazelcastProperties.getGroup().getName());
NetworkConfig networkConfig = config.getNetworkConfig();
networkConfig.setPortAutoIncrement(false);
networkConfig.getJoin().getKubernetesConfig().setEnabled(hazelcastProperties.isNetworkEnabled())
.setProperty("service-dns", hazelcastProperties.getServiceDNS())
.setProperty("service-port", hazelcastProperties.getServicePort().toString());
config.setProperty("hazelcast.metrics.enabled", "false");
networkConfig.getJoin().getMulticastConfig().setEnabled(false);
return Hazelcast.newHazelcastInstance(config);
}
The HazelcastProperties being the ConfigurationProperties object mapped with the properties.
For local testing you can just disable the network config by using the properties in your local profile:
hazelcast:
network-enabled: false
service-port: 5701
group:
name: your-hazelcast-group-name
You could use an embeddable scheduler like db-scheduler to accomplish this. It has persistent executions and uses a simple optimistic locking mechanism to guarantee execution by a single node.
Example code for how the use-case can be achieved:
RecurringTask<Void> recurring1 = Tasks.recurring("my-task-name", FixedDelay.of(Duration.ofSeconds(60)))
.execute((taskInstance, executionContext) -> {
System.out.println("Executing " + taskInstance.getTaskAndInstance());
});
final Scheduler scheduler = Scheduler
.create(dataSource)
.startTasks(recurring1)
.build();
scheduler.start();
I am using an free HTTP service called kJob-Manager. https://kjob-manager.ciesielski-systems.de/
Advantage is that you dont create a new table in your database and also dont need any database connection because it is just a HTTP request.
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.util.LinkedHashMap;
import org.apache.tomcat.util.json.JSONParser;
import org.apache.tomcat.util.json.ParseException;
import org.junit.jupiter.api.Test;
public class KJobManagerTest {
#Test
public void example() throws IOException, ParseException {
String data = "{\"token\":\"<API-Token>\"}";
URL url = new URL("https://kjob-manager.ciesielski-systems.de/api/ticket/<JOB-ID>");
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestProperty("Content-Type", "application/json");
connection.setRequestMethod("POST");
connection.setDoOutput(true);
connection.getOutputStream().write(data.getBytes(StandardCharsets.UTF_8));
JSONParser jsonParser = new JSONParser(connection.getInputStream());
LinkedHashMap<String, LinkedHashMap<String, Object>> result = (LinkedHashMap<String, LinkedHashMap<String, Object>>) jsonParser.parse();
if ((boolean) result.get("ticket").get("open")) {
System.out.println("This replica could run the cronjob!");
} else {
System.out.println("This replica has nothing to do!");
}
}
}
Spring context is not clustered so manage the task in distributed application is a little bit difficult and you need to use systems supporting jgroup to synchronis the state and let your task take the priority to execute the action. Or you could use ejb context to manage clustered ha singleton service like jboss ha environment
https://developers.redhat.com/quickstarts/eap/cluster-ha-singleton/?referrer=jbd
Or you could use clustered cache and access lock resource between the service and first service take the lock will beform the action or implement you own jgroup to communicat your service and perform the action one one node

How to display a link on a particular rel as an array even if there is only one link

for (Person person : company.getPersons()) {
resource.add(linkTo(methodOn(PersonController.class).view(person.getId()))
.withRel("persons"));
}
I want to return an array of links by "persons" rel. It's all ok if I have multiple persons, but if I have only a single person it returns a single element and my client code that expects array fails.
not possible in spring hateoas 18. We overloaded the built in serializer to account for this. It was very nasty.
Technically a client should interpret rel : {} as rel : [{}] to be HAL compliant..but they rarely do..
you have to remove and override the built in HATEOAS one, we did it like this, but this effectively removes all other converters:
#Configuration
public class WebMVCConfig extends WebMvcConfigurerAdapter {
private static final String DELEGATING_REL_PROVIDER_BEAN_NAME = "_relProvider";
private static final String LINK_DISCOVERER_REGISTRY_BEAN_NAME = "_linkDiscovererRegistry";
private static final String HAL_OBJECT_MAPPER_BEAN_NAME = "_halObjectMapper";
public WebMVCConfig(){
}
#Autowired
private ListableBeanFactory beanFactory;
#Override
public void configureMessageConverters(List<HttpMessageConverter<?>> converters) {
//Need to override some behaviour in the HAL Serializer...so let's do that
CurieProvider curieProvider = getCurieProvider(beanFactory);
RelProvider relProvider = beanFactory.getBean(DELEGATING_REL_PROVIDER_BEAN_NAME, RelProvider.class);
ObjectMapper halObjectMapper = beanFactory.getBean(HAL_OBJECT_MAPPER_BEAN_NAME, ObjectMapper.class);
halObjectMapper.registerModule(new MultiLinkAwareJackson2HalModule());
halObjectMapper.setHandlerInstantiator(new MultiLinkAwareJackson2HalModule.MultiLinkAwareHalHandlerInstantiator(relProvider, curieProvider));
MappingJackson2HttpMessageConverter halConverter = new TypeConstrainedMappingJackson2HttpMessageConverter(ResourceSupport.class);
halConverter.setSupportedMediaTypes(Arrays.asList(HAL_JSON));
halConverter.setObjectMapper(halObjectMapper);
converters.add(halConverter);
}
private static CurieProvider getCurieProvider(BeanFactory factory) {
try {
return factory.getBean(CurieProvider.class);
} catch (NoSuchBeanDefinitionException e) {
return null;
}
}
overriding the serializer is really ugly business..maybe we should have just built a new one from scratch
/*
* Copyright 2012-2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.fasterxml.jackson.databind.introspect.Annotated;
import com.fasterxml.jackson.databind.ser.std.MapSerializer;
import com.fasterxml.jackson.databind.type.TypeFactory;
import com.google.common.collect.ImmutableSet;
import org.springframework.hateoas.hal.*;
import java.io.IOException;
import java.util.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.springframework.hateoas.Link;
import org.springframework.hateoas.Links;
import org.springframework.hateoas.RelProvider;
import org.springframework.hateoas.ResourceSupport;
import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.BeanProperty;
import com.fasterxml.jackson.databind.JavaType;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.JsonSerializer;
import com.fasterxml.jackson.databind.SerializationConfig;
import com.fasterxml.jackson.databind.SerializerProvider;
import javax.xml.bind.annotation.XmlElement;
/**
* Jackson 2 module implementation to render {#link org.springframework.hateoas.Link} and {#link org.springframework.hateoas.ResourceSupport} instances in HAL compatible JSON.
*
* Extends this class to make it possible for a relationship to be serialized as an array even if there is only 1 link
* This is done is in OptionalListJackson2Serializer::serialize method.
*
* Relationships to force as arrays are defined in relsToForceAsAnArray
*/
public class MultiLinkAwareJackson2HalModule extends Jackson2HalModule {
private static final long serialVersionUID = 7806951456457932384L;
private static final ImmutableSet<String> relsToForceAsAnArray = ImmutableSet.copyOf(Arrays.asList(
ContractConstants.REL_PROMOTION_TARGET,
ContractConstants.REL_PROFILE,
ContractConstants.REL_IMAGE_FLAG,
ContractConstants.REL_IMAGE,
ContractConstants.REL_IMAGE_PRIMARY,
ContractConstants.REL_IMAGE_SECONDARY,
ContractConstants.REL_IMAGE_MENU,
ContractConstants.REL_ITEM
));
private static abstract class MultiLinkAwareResourceSupportMixin extends ResourceSupport {
#Override
#XmlElement(name = "link")
#JsonProperty("_links")
//here's the only diff from org.springframework.hateoas.hal.ResourceSupportMixin
//we use a different HalLinkListSerializer
#JsonSerialize(include = JsonSerialize.Inclusion.NON_EMPTY, using = MultiLinkAwareHalLinkListSerializer.class)
#JsonDeserialize(using = MultiLinkAwareJackson2HalModule.HalLinkListDeserializer.class)
public abstract List<Link> getLinks();
}
public MultiLinkAwareJackson2HalModule() {
super();
//NOTE: super calls setMixInAnnotation(Link.class, LinkMixin.class);
//you must not override this as this is how Spring-HATEOAS determines if a
//Hal converter has been registered for not.
//If it determines a Hal converter has not been registered, it will register it's own
//that will override this one
//Use customized ResourceSupportMixin to use our LinkListSerializer
setMixInAnnotation(ResourceSupport.class, MultiLinkAwareResourceSupportMixin.class);
}
public static class MultiLinkAwareHalLinkListSerializer extends Jackson2HalModule.HalLinkListSerializer {
private final BeanProperty property;
private final CurieProvider curieProvider;
private final Set<String> relsAsMultilink;
public MultiLinkAwareHalLinkListSerializer(BeanProperty property, CurieProvider curieProvider, Set<String> relsAsMultilink) {
super(property, curieProvider);
this.property = property;
this.curieProvider = curieProvider;
this.relsAsMultilink = relsAsMultilink;
}
#Override
public void serialize(List<Link> value, JsonGenerator jgen, SerializerProvider provider) throws IOException,
JsonGenerationException {
// sort links according to their relation
Map<String, List<Object>> sortedLinks = new LinkedHashMap<String, List<Object>>();
List<Link> links = new ArrayList<Link>();
boolean prefixingRequired = curieProvider != null;
boolean curiedLinkPresent = false;
for (Link link : value) {
String rel = prefixingRequired ? curieProvider.getNamespacedRelFrom(link) : link.getRel();
if (!link.getRel().equals(rel)) {
curiedLinkPresent = true;
}
if (sortedLinks.get(rel) == null) {
sortedLinks.put(rel, new ArrayList<Object>());
}
links.add(link);
sortedLinks.get(rel).add(link);
}
if (prefixingRequired && curiedLinkPresent) {
ArrayList<Object> curies = new ArrayList<Object>();
curies.add(curieProvider.getCurieInformation(new Links(links)));
sortedLinks.put("curies", curies);
}
TypeFactory typeFactory = provider.getConfig().getTypeFactory();
JavaType keyType = typeFactory.uncheckedSimpleType(String.class);
JavaType valueType = typeFactory.constructCollectionType(ArrayList.class, Object.class);
JavaType mapType = typeFactory.constructMapType(HashMap.class, keyType, valueType);
//CHANGE HERE: only thing we are changing ins the List Serializer
//shame there's not a better way to override this very specific behaviour
//without copy pasta the whole class
MapSerializer serializer = MapSerializer.construct(new String[] {}, mapType, true, null,
provider.findKeySerializer(keyType, null), new MultiLinkAwareOptionalListJackson2Serializer(property, relsAsMultilink), null);
serializer.serialize(sortedLinks, jgen, provider);
}
public MultiLinkAwareHalLinkListSerializer withForcedRels(String[] relationships) {
ImmutableSet<String> relsToForce = ImmutableSet.<String>builder().addAll(this.relsAsMultilink).add(relationships).build();
return new MultiLinkAwareHalLinkListSerializer(this.property, this.curieProvider, relsToForce);
}
#Override
public JsonSerializer<?> createContextual(SerializerProvider provider, BeanProperty property)
throws JsonMappingException {
return new MultiLinkAwareHalLinkListSerializer(property, curieProvider, this.relsAsMultilink);
}
}
public static class MultiLinkAwareOptionalListJackson2Serializer extends Jackson2HalModule.OptionalListJackson2Serializer {
private final BeanProperty property;
private final Map<Class<?>, JsonSerializer<Object>> serializers;
private final Set<String> relsAsMultilink;
public MultiLinkAwareOptionalListJackson2Serializer(BeanProperty property, Set<String> relsAsMultilink) {
super(property);
this.property = property;
this.serializers = new HashMap<Class<?>, JsonSerializer<Object>>();
this.relsAsMultilink = relsAsMultilink;
}
#Override
public void serialize(Object value, JsonGenerator jgen, SerializerProvider provider) throws IOException,
JsonGenerationException {
List<?> list = (List<?>) value;
if (list.isEmpty()) {
return;
}
if(list.get(0) instanceof Link) {
Link link = (Link) list.get(0);
String rel = link.getRel();
if (list.size() > 1 || relsAsMultilink.contains(rel)) {
jgen.writeStartArray();
serializeContents(list.iterator(), jgen, provider);
jgen.writeEndArray();
} else {
serializeContents(list.iterator(), jgen, provider);
}
}
}
private void serializeContents(Iterator<?> value, JsonGenerator jgen, SerializerProvider provider)
throws IOException, JsonGenerationException {
while (value.hasNext()) {
Object elem = value.next();
if (elem == null) {
provider.defaultSerializeNull(jgen);
} else {
getOrLookupSerializerFor(elem.getClass(), provider).serialize(elem, jgen, provider);
}
}
}
private JsonSerializer<Object> getOrLookupSerializerFor(Class<?> type, SerializerProvider provider)
throws JsonMappingException {
JsonSerializer<Object> serializer = serializers.get(type);
if (serializer == null) {
serializer = provider.findValueSerializer(type, property);
serializers.put(type, serializer);
}
return serializer;
}
#Override
public JsonSerializer<?> createContextual(SerializerProvider provider, BeanProperty property)
throws JsonMappingException {
return new MultiLinkAwareOptionalListJackson2Serializer(property, relsAsMultilink);
}
}
public static class MultiLinkAwareHalHandlerInstantiator extends Jackson2HalModule.HalHandlerInstantiator {
private final MultiLinkAwareHalLinkListSerializer linkListSerializer;
public MultiLinkAwareHalHandlerInstantiator(RelProvider resolver, CurieProvider curieProvider) {
super(resolver, curieProvider, true);
this.linkListSerializer = new MultiLinkAwareHalLinkListSerializer(null, curieProvider, relsToForceAsAnArray);
}
#Override
public JsonSerializer<?> serializerInstance(SerializationConfig config, Annotated annotated, Class<?> serClass) {
if(serClass.equals(MultiLinkAwareHalLinkListSerializer.class)){
if (annotated.hasAnnotation(ForceMultiLink.class)) {
return this.linkListSerializer.withForcedRels(annotated.getAnnotation(ForceMultiLink.class).value());
} else {
return this.linkListSerializer;
}
} else {
return super.serializerInstance(config, annotated, serClass);
}
}
}
}
that ForceMultiLink stuff was an additional thing we ended up needing where on some resource classes a rel needed to be multi and on others it did not...so it looks like this:
#Target(ElementType.METHOD)
#Retention(RetentionPolicy.RUNTIME)
public #interface ForceMultiLink {
String[] value();
}
you use it to annotate the getLinks() method in your resource class
I have a workaround for this issue that is along similar lines to Chris' answer. The main difference is that I did not extend Jackson2HalModule, but created a new handler-instantiator and set it as the handler-instantiator for a new instance of Jackson2HalModule that I create myself. I hope Spring HATEOAS will eventually support this functionality natively; I have a pull request that attempts to do this. Here's how I implemented my workaround:
Step 1: Create the mixin class:
public abstract class HalLinkListMixin {
#JsonProperty("_links") #JsonSerialize(using = HalLinkListSerializer.class)
public abstract List<Link> getLinks();
}
This mixin class will associate the HalLinkListSerializer (shown later) serializer with the links property.
Step 2: Create a container class that holds the rels whose link representations should always be an array of link objects:
public class HalMultipleLinkRels {
private final Set<String> rels;
public HalMultipleLinkRels(String... rels) {
this.rels = new HashSet<String>(Arrays.asList(rels));
}
public Set<String> getRels() {
return Collections.unmodifiableSet(rels);
}
}
Step 3: Create our new serializer that will override Spring HATEOAS's link-list serializer:
public class HalLinkListSerializer extends ContainerSerializer<List<Link>> implements ContextualSerializer {
private final BeanProperty property;
private CurieProvider curieProvider;
private HalMultipleLinkRels halMultipleLinkRels;
public HalLinkListSerializer() {
this(null, null, new HalMultipleLinkRels());
}
public HalLinkListSerializer(CurieProvider curieProvider, HalMultipleLinkRels halMultipleLinkRels) {
this(null, curieProvider, halMultipleLinkRels);
}
public HalLinkListSerializer(BeanProperty property, CurieProvider curieProvider, HalMultipleLinkRels halMultipleLinkRels) {
super(List.class, false);
this.property = property;
this.curieProvider = curieProvider;
this.halMultipleLinkRels = halMultipleLinkRels;
}
#Override
public void serialize(List<Link> value, JsonGenerator jgen, SerializerProvider provider) throws IOException, JsonGenerationException {
// sort links according to their relation
Map<String, List<Object>> sortedLinks = new LinkedHashMap<>();
List<Link> links = new ArrayList<>();
boolean prefixingRequired = curieProvider != null;
boolean curiedLinkPresent = false;
for (Link link : value) {
String rel = prefixingRequired ? curieProvider.getNamespacedRelFrom(link) : link.getRel();
if (!link.getRel().equals(rel)) {
curiedLinkPresent = true;
}
if (sortedLinks.get(rel) == null) {
sortedLinks.put(rel, new ArrayList<>());
}
links.add(link);
sortedLinks.get(rel).add(link);
}
if (prefixingRequired && curiedLinkPresent) {
ArrayList<Object> curies = new ArrayList<>();
curies.add(curieProvider.getCurieInformation(new Links(links)));
sortedLinks.put("curies", curies);
}
TypeFactory typeFactory = provider.getConfig().getTypeFactory();
JavaType keyType = typeFactory.uncheckedSimpleType(String.class);
JavaType valueType = typeFactory.constructCollectionType(ArrayList.class, Object.class);
JavaType mapType = typeFactory.constructMapType(HashMap.class, keyType, valueType);
MapSerializer serializer = MapSerializer.construct(new String[]{}, mapType, true, null,
provider.findKeySerializer(keyType, null), new ListJackson2Serializer(property, halMultipleLinkRels), null);
serializer.serialize(sortedLinks, jgen, provider);
}
#Override
public JavaType getContentType() {
return null;
}
#Override
public JsonSerializer<?> getContentSerializer() {
return null;
}
#Override
public boolean hasSingleElement(List<Link> value) {
return value.size() == 1;
}
#Override
protected ContainerSerializer<?> _withValueTypeSerializer(TypeSerializer vts) {
return null;
}
#Override
public JsonSerializer<?> createContextual(SerializerProvider prov, BeanProperty property) throws JsonMappingException {
return new HalLinkListSerializer(property, curieProvider, halMultipleLinkRels);
}
private static class ListJackson2Serializer extends ContainerSerializer<Object> implements ContextualSerializer {
private final BeanProperty property;
private final Map<Class<?>, JsonSerializer<Object>> serializers = new HashMap<>();
private final HalMultipleLinkRels halMultipleLinkRels;
public ListJackson2Serializer() {
this(null, null);
}
public ListJackson2Serializer(BeanProperty property, HalMultipleLinkRels halMultipleLinkRels) {
super(List.class, false);
this.property = property;
this.halMultipleLinkRels = halMultipleLinkRels;
}
#Override
public void serialize(Object value, JsonGenerator jgen, SerializerProvider provider) throws IOException, JsonGenerationException {
List<?> list = (List<?>) value;
if (list.isEmpty()) {
return;
}
if (list.size() == 1) {
Object element = list.get(0);
if (element instanceof Link) {
Link link = (Link) element;
if (halMultipleLinkRels.getRels().contains(link.getRel())) {
jgen.writeStartArray();
serializeContents(list.iterator(), jgen, provider);
jgen.writeEndArray();
return;
}
}
serializeContents(list.iterator(), jgen, provider);
return;
}
jgen.writeStartArray();
serializeContents(list.iterator(), jgen, provider);
jgen.writeEndArray();
}
#Override
public JavaType getContentType() {
return null;
}
#Override
public JsonSerializer<?> getContentSerializer() {
return null;
}
#Override
public boolean hasSingleElement(Object value) {
return false;
}
#Override
protected ContainerSerializer<?> _withValueTypeSerializer(TypeSerializer vts) {
throw new UnsupportedOperationException("not implemented");
}
#Override
public JsonSerializer<?> createContextual(SerializerProvider prov, BeanProperty property) throws JsonMappingException {
return new ListJackson2Serializer(property, halMultipleLinkRels);
}
private void serializeContents(Iterator<?> value, JsonGenerator jgen, SerializerProvider provider) throws IOException, JsonGenerationException {
while (value.hasNext()) {
Object elem = value.next();
if (elem == null) {
provider.defaultSerializeNull(jgen);
} else {
getOrLookupSerializerFor(elem.getClass(), provider).serialize(elem, jgen, provider);
}
}
}
private JsonSerializer<Object> getOrLookupSerializerFor(Class<?> type, SerializerProvider provider) throws JsonMappingException {
JsonSerializer<Object> serializer = serializers.get(type);
if (serializer == null) {
serializer = provider.findValueSerializer(type, property);
serializers.put(type, serializer);
}
return serializer;
}
}
}
This class unfortunately duplicates logic, but it's not too bad. The key difference is that instead of using OptionalListJackson2Serializer, I'm using ListJackson2Serializer, which will force a rel's link representation as an array, if that rel exists in the container of rel overrides (HalMultipleLinkRels):
Step 4: Create a custom handler-instantiator:
public class HalHandlerInstantiator extends HandlerInstantiator {
private final Jackson2HalModule.HalHandlerInstantiator halHandlerInstantiator;
private final Map<Class<?>, JsonSerializer<?>> serializerMap = new HashMap<>();
public HalHandlerInstantiator(RelProvider relProvider, CurieProvider curieProvider, HalMultipleLinkRels halMultipleLinkRels) {
this(relProvider, curieProvider, halMultipleLinkRels, true);
}
public HalHandlerInstantiator(RelProvider relProvider, CurieProvider curieProvider, HalMultipleLinkRels halMultipleLinkRels, boolean enforceEmbeddedCollections) {
halHandlerInstantiator = new Jackson2HalModule.HalHandlerInstantiator(relProvider, curieProvider, enforceEmbeddedCollections);
serializerMap.put(HalLinkListSerializer.class, new HalLinkListSerializer(curieProvider, halMultipleLinkRels));
}
#Override
public JsonDeserializer<?> deserializerInstance(DeserializationConfig config, Annotated annotated, Class<?> deserClass) {
return halHandlerInstantiator.deserializerInstance(config, annotated, deserClass);
}
#Override
public KeyDeserializer keyDeserializerInstance(DeserializationConfig config, Annotated annotated, Class<?> keyDeserClass) {
return halHandlerInstantiator.keyDeserializerInstance(config, annotated, keyDeserClass);
}
#Override
public JsonSerializer<?> serializerInstance(SerializationConfig config, Annotated annotated, Class<?> serClass) {
if(serializerMap.containsKey(serClass)) {
return serializerMap.get(serClass);
} else {
return halHandlerInstantiator.serializerInstance(config, annotated, serClass);
}
}
#Override
public TypeResolverBuilder<?> typeResolverBuilderInstance(MapperConfig<?> config, Annotated annotated, Class<?> builderClass) {
return halHandlerInstantiator.typeResolverBuilderInstance(config, annotated, builderClass);
}
#Override
public TypeIdResolver typeIdResolverInstance(MapperConfig<?> config, Annotated annotated, Class<?> resolverClass) {
return halHandlerInstantiator.typeIdResolverInstance(config, annotated, resolverClass);
}
}
This instantiator will control the lifecycle for our custom serializer. It maintains an internal instance of Jackson2HalModule.HalHandlerInstantiator, and delegates to that instance for all other serializers.
Step 5: Put it all together:
#Configuration
public class ApplicationConfiguration {
private static final String HAL_OBJECT_MAPPER_BEAN_NAME = "_halObjectMapper";
private static final String DELEGATING_REL_PROVIDER_BEAN_NAME = "_relProvider";
#Autowired
private BeanFactory beanFactory;
private static CurieProvider getCurieProvider(BeanFactory factory) {
try {
return factory.getBean(CurieProvider.class);
} catch (NoSuchBeanDefinitionException e) {
return null;
}
}
#Bean
public ObjectMapper objectMapper() {
CurieProvider curieProvider = getCurieProvider(beanFactory);
RelProvider relProvider = beanFactory.getBean(DELEGATING_REL_PROVIDER_BEAN_NAME, RelProvider.class);
ObjectMapper halObjectMapper = beanFactory.getBean(HAL_OBJECT_MAPPER_BEAN_NAME, ObjectMapper.class);
//Create a new instance of Jackson2HalModule
SimpleModule module = new Jackson2HalModule();
//Provide the mix-in class so that we can override the serializer for links with our custom serializer
module.setMixInAnnotation(ResourceSupport.class, HalLinkListMixin.class);
//Register the module in the object mapper
halObjectMapper.registerModule(module);
//Set the handler instantiator on the mapper to our custom handler-instantiator
halObjectMapper.setHandlerInstantiator(new HalHandlerInstantiator(relProvider, curieProvider, halMultipleLinkRels()));
return halObjectMapper;
}
...
}
Don't forget the "self" resource link required by HAL.
In that case, that's no so common to have only one link.

java.lang.NoSuchMethodError: org.hibernate.SessionFactory.openSession()

For some reason I get the following exception when using Spring Batch in combination with Hibernate 4.
java.lang.NoSuchMethodError: org.hibernate.SessionFactory.openSession()Lorg/hibernate/classic/Session;
at org.springframework.batch.item.database.HibernateItemReaderHelper.createQuery(HibernateItemReaderHelper.java:152)
at org.springframework.batch.item.database.HibernateItemReaderHelper.getForwardOnlyCursor(HibernateItemReaderHelper.java:122)
at ....
I upgraded to the latest Spring batch 2.1.8.RELEASE and Spring 3.1.1.RELEASE which supposed to work with Hibernate 4. I looked into the source and it seems that the helper class is using the new version of the session factory that is used in Hibernate 4:
package org.springframework.batch.item.database;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.hibernate.Query;
import org.hibernate.ScrollMode;
import org.hibernate.ScrollableResults;
import org.hibernate.Session;
import org.hibernate.SessionFactory;
import org.hibernate.StatelessSession;
import org.springframework.batch.item.database.orm.HibernateQueryProvider;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;
/**
* Internal shared state helper for hibernate readers managing sessions and
* queries.
*
* #author Dave Syer
*
*/
public class HibernateItemReaderHelper<T> implements InitializingBean {
private SessionFactory sessionFactory;
private String queryString = "";
private String queryName = "";
private HibernateQueryProvider queryProvider;
private boolean useStatelessSession = true;
private StatelessSession statelessSession;
private Session statefulSession;
/**
* #param queryName name of a hibernate named query
*/
public void setQueryName(String queryName) {
this.queryName = queryName;
}
/**
* #param queryString HQL query string
*/
public void setQueryString(String queryString) {
this.queryString = queryString;
}
/**
* #param queryProvider Hibernate query provider
*/
public void setQueryProvider(HibernateQueryProvider queryProvider) {
this.queryProvider = queryProvider;
}
/**
* Can be set only in uninitialized state.
*
* #param useStatelessSession <code>true</code> to use
* {#link StatelessSession} <code>false</code> to use standard hibernate
* {#link Session}
*/
public void setUseStatelessSession(boolean useStatelessSession) {
Assert.state(statefulSession == null && statelessSession == null,
"The useStatelessSession flag can only be set before a session is initialized.");
this.useStatelessSession = useStatelessSession;
}
/**
* #param sessionFactory hibernate session factory
*/
public void setSessionFactory(SessionFactory sessionFactory) {
this.sessionFactory = sessionFactory;
}
public void afterPropertiesSet() throws Exception {
Assert.state(sessionFactory != null, "A SessionFactory must be provided");
if (queryProvider == null) {
Assert.notNull(sessionFactory, "session factory must be set");
Assert.state(StringUtils.hasText(queryString) ^ StringUtils.hasText(queryName),
"queryString or queryName must be set");
}
// making sure that the appropriate (Hibernate) query provider is set
else {
Assert.state(queryProvider != null, "Hibernate query provider must be set");
}
}
/**
* Get a cursor over all of the results, with the forward-only flag set.
*
* #param fetchSize the fetch size to use retrieving the results
* #param parameterValues the parameter values to use (or null if none).
*
* #return a forward-only {#link ScrollableResults}
*/
public ScrollableResults getForwardOnlyCursor(int fetchSize, Map<String, Object> parameterValues) {
Query query = createQuery();
if (parameterValues != null) {
query.setProperties(parameterValues);
}
return query.setFetchSize(fetchSize).scroll(ScrollMode.FORWARD_ONLY);
}
/**
* Open appropriate type of hibernate session and create the query.
*/
public Query createQuery() {
if (useStatelessSession) {
if (statelessSession == null) {
statelessSession = sessionFactory.openStatelessSession();
}
if (queryProvider != null) {
queryProvider.setStatelessSession(statelessSession);
}
else {
if (StringUtils.hasText(queryName)) {
return statelessSession.getNamedQuery(queryName);
}
else {
return statelessSession.createQuery(queryString);
}
}
}
else {
if (statefulSession == null) {
statefulSession = sessionFactory.openSession();
}
if (queryProvider != null) {
queryProvider.setSession(statefulSession);
}
else {
if (StringUtils.hasText(queryName)) {
return statefulSession.getNamedQuery(queryName);
}
else {
return statefulSession.createQuery(queryString);
}
}
}
// If queryProvider is set use it to create a query
return queryProvider.createQuery();
}
/**
* Scroll through the results up to the item specified.
*
* #param cursor the results to scroll over
*/
public void jumpToItem(ScrollableResults cursor, int itemIndex, int flushInterval) {
for (int i = 0; i < itemIndex; i++) {
cursor.next();
if (i % flushInterval == 0 && !useStatelessSession) {
statefulSession.clear(); // Clears in-memory cache
}
}
}
/**
* Close the open session (stateful or otherwise).
*/
public void close() {
if (statelessSession != null) {
statelessSession.close();
statelessSession = null;
}
if (statefulSession != null) {
statefulSession.close();
statefulSession = null;
}
}
/**
* Read a page of data, clearing the existing session (if necessary) first,
* and creating a new session before executing the query.
*
* #param page the page to read (starting at 0)
* #param pageSize the size of the page or maximum number of items to read
* #param fetchSize the fetch size to use
* #param parameterValues the parameter values to use (if any, otherwise
* null)
* #return a collection of items
*/
public Collection<? extends T> readPage(int page, int pageSize, int fetchSize, Map<String, Object> parameterValues) {
clear();
Query query = createQuery();
if (parameterValues != null) {
query.setProperties(parameterValues);
}
#SuppressWarnings("unchecked")
List<T> result = query.setFetchSize(fetchSize).setFirstResult(page * pageSize).setMaxResults(pageSize).list();
return result;
}
/**
* Clear the session if stateful.
*/
public void clear() {
if (statefulSession != null) {
statefulSession.clear();
}
}
}
So the question is why is it still trying to use an older version even though the newest is used. Does anybody have an idea why this still could be happening?
We finally got it to work by compiling Spring batch against Hibernate 4. It seems Spring batch is not compatible with Hibernate 4.
This is what's in my WAR:
activation-1.1.jar
amqp-client-2.7.1.jar
antlr-2.7.7.jar
aopalliance-1.0.jar
aspectjrt-1.5.0.jar
aspectjweaver-1.5.2.jar
aspectjweaver-1.6.9.jar
avalon-framework-4.1.3.jar
billing-commons-1.9.0-SNAPSHOT.jar
billing-core-1.9.0-SNAPSHOT.jar
cglib-nodep-2.2.2.jar
classpath.txt
commons-batch-2.0.0-SNAPSHOT.jar
commons-beanutils-1.7.0.jar
commons-cli-1.1.jar
commons-codec-1.2.jar
commons-customer-experience-1.3.0.jar
commons-domain-1.0.0.jar
commons-email-1.1.jar
commons-email-1.9.1-SNAPSHOT.jar
commons-hibernate-3.0.0-SNAPSHOT.jar
commons-http-1.2.1.jar
commons-httpclient-3.1.jar
commons-httpclient-contrib-3.1.jar
commons-io-1.3.1.jar
commons-lang-2.1.jar
commons-logging-1.1.jar
commons-logging-1.2.0.jar
commons-monitoring-1.0.0.jar
commons-property-3.3.0.jar
commons-rabbitmq-1.1.1.jar
commons-spring-agent-2.5.0-SNAPSHOT.jar
commons-util-1.6.1.jar
customer-inventory-commons-1.4.0.jar
customer-inventory-core-1.4.0.jar
cxf-api-2.3.2.jar
cxf-common-schemas-2.3.2.jar
cxf-common-utilities-2.3.2.jar
cxf-rt-bindings-xml-2.3.2.jar
cxf-rt-core-2.3.2.jar
cxf-rt-frontend-jaxrs-2.3.2.jar
cxf-rt-transports-common-2.3.2.jar
cxf-rt-transports-http-2.3.2.jar
dom4j-1.6.1.jar
generic-monitoring-console-api-1.1.0.jar
geronimo-javamail_1.4_spec-1.7.1.jar
hibernate-commons-annotations-4.0.1.Final.jar
hibernate-core-4.1.4.Final.jar
hibernate-entitymanager-4.1.4.Final.jar
hibernate-jpa-2.0-api-1.0.1.Final.jar
hibernate-validator-4.3.0.Final.jar
javassist-3.15.0-GA.jar
jaxb-impl-2.1.13.jar
jboss-logging-3.1.0.GA.jar
jboss-transaction-api_1.1_spec-1.0.0.Final.jar
jettison-1.1.jar
jms-1.1.jar
jsr250-api-1.0.jar
jsr311-api-1.1.1.jar
log4j-1.2.15.jar
logkit-1.0.1.jar
mail-1.4.jar
neethi-2.0.4.jar
orchestration-api-1.7.0-20120820.120350-6.jar
quartz-1.5.2.jar
simplestuff-0.9.jar
singleview-api-commons-1.1.0.jar
singleview-api-core-1.1.0.jar
slf4j-api-1.5.6.jar
slf4j-simple-1.5.6.jar
spring-aop-3.1.1.RELEASE.jar
spring-asm-3.1.1.RELEASE.jar
spring-aspects-3.1.1.RELEASE.jar
spring-batch-core-2.1.8.RELEASE.jar
spring-batch-infrastructure-2.1.8.RELEASE.jar
spring-beans-3.1.1.RELEASE.jar
spring-context-3.1.1.RELEASE.jar
spring-context-support-3.1.1.RELEASE.jar
spring-core-3.1.1.RELEASE.jar
spring-expression-3.1.1.RELEASE.jar
spring-jdbc-3.1.1.RELEASE.jar
spring-orm-3.1.1.RELEASE.jar
spring-tx-3.1.1.RELEASE.jar
spring-web-3.1.1.RELEASE.jar
spring-webmvc-3.1.1.RELEASE.jar
stax2-api-3.0.2.jar
validation-api-1.0.0.GA.jar
woodstox-core-asl-4.0.8.jar
wsdl4j-1.6.2.jar
XmlSchema-1.4.7.jar
xpp3_min-1.1.4c.jar
xstream-1.3.1.jar

Resources