Spring Data Redis SET command supports EX and NX - spring

Do Spring Data Redis support SET command with Options
My use case:
127.0.0.1:6379> set lock.foo RUNNING NX EX 20
Then check if Redis return value OK or (nil)

Use RedisTemplate#execute(RedisCallback<T> method, demo:
#Autowired
private RedisTemplate redisTemplate;
public void test() {
String redisKey = "lock.foo";
String value = "RUNNING";
long expire = 20L;
Boolean result = (Boolean) redisTemplate.execute((RedisCallback<Boolean>) connection -> {
byte[] redisKeyBytes = redisTemplate.getKeySerializer().serialize(redisKey);
byte[] valueBytes = redisTemplate.getValueSerializer().serialize(value);
Expiration expiration = Expiration.from(expire, TimeUnit.SECONDS);
return connection.set(redisKeyBytes, valueBytes, expiration, RedisStringCommands.SetOption.SET_IF_ABSENT);
});
System.out.println("result = " + result);
}
RedisTemplate config:
#Configuration
public class RedisConfig {
#Bean
public RedisSerializer<String> keySerializer() {
return new StringRedisSerializer();
}
#Bean
public RedisSerializer<Object> valueSerializer() {
return new GenericJackson2JsonRedisSerializer();
}
#Bean
public RedisTemplate redisTemplate(RedisTemplate redisTemplate, RedisSerializer keySerializer, RedisSerializer valueSerializer) {
//set key serializer
redisTemplate.setKeySerializer(keySerializer);
redisTemplate.setHashKeySerializer(keySerializer);
//set value serializer
redisTemplate.setValueSerializer(valueSerializer);
redisTemplate.setHashValueSerializer(valueSerializer);
return redisTemplate;
}
}

Cannot see any Spring template value operations solutions, so I did a 'native' execute on the connection org.springframework.data.redis.connection.StringRedisConnection#execute(java.lang.String, java.lang.String...)
Then it is up to me to take care of processing of arguments and the result.

Related

more than one 'primary' service instance suppliers found during load balancing (spring boot/cloud)

I'm currently updating from Spring boot 2.2.x to 2.6.x + legacy code, it's a big jump so there were multiple changes. I'm now running into a problem with load balancing through an api-gateway. I'll apologize in advance for the wall of code to come. I will put the point of failure at the bottom.
When I send in an API request, I get the following error:
more than one 'primary' bean found among candidates: [zookeeperDiscoveryClientServiceInstanceListSupplier, serviceInstanceListSupplier, retryAwareDiscoveryClientServiceInstanceListSupplier]
it seems that the zookeeperDiscovery and retryAware suppliers are loaded through the default serviceInsatnceListSupplier, which has #Primary over it. I thought would take precedence over the other ones. I assume I must be doing something wrong due changes in the newer version, here are the relevant code in question:
#Configuration
#LoadBalancerClients(defaultConfiguration = ClientConfiguration.class)
public class WebClientConfiguration {
#Bean
#Qualifier("microserviceWebClient")
#ConditionalOnMissingBean(name = "microserviceWebClient")
public WebClient microserviceWebClient(#Qualifier("microserviceWebClientBuilder") WebClient.Builder builder) {
return builder.build();
}
#Bean
#Qualifier("microserviceWebClientBuilder")
#ConditionalOnMissingBean(name = "microserviceWebClientBuilder")
#LoadBalanced
public WebClient.Builder microserviceWebClientBuilder() {
return WebClient.builder();
}
#Bean
#Primary
public ReactorLoadBalancerExchangeFilterFunction reactorLoadBalancerExchangeFilterFunction(
ReactiveLoadBalancer.Factory<ServiceInstance> loadBalancerFactory) {
//the transformer is currently null, there wasn't a transformer before the upgrade
return new CustomExchangeFilterFunction(loadBalancerFactory, transformer);
}
}
There are also some Feign Client related configs here which I will omit, since it's not (or shouldn't be) playing a role in this problem:
public class ClientConfiguration {
/**
* The property key within the feign clients configuration context for the feign client name.
*/
public static final String FEIGN_CLIENT_NAME_PROPERTY = "feign.client.name";
public ClientConfiguration() {
}
//Creates a new BiPredicate for shouldClose. This will be used to determine if HTTP Connections should be automatically closed or not.
#Bean
#ConditionalOnMissingBean
public BiPredicate<Response, Type> shouldClose() {
return (Response response, Type type) -> {
if(type instanceof Class) {
Class<?> currentClass = (Class<?>) type;
return (null == AnnotationUtils.getAnnotation(currentClass, EnableResponseStream.class));
}
return true;
};
}
//Creates a Custom Decoder
#Bean
public Decoder createCustomDecoder(
ObjectFactory<HttpMessageConverters> converters, BiPredicate<Response, Type> shouldClose
) {
return new CustomDecoder(converters, shouldClose);
}
#Bean
#Qualifier("loadBalancerName")
public String loadBalancerName(PropertyResolver propertyResolver) {
String name = propertyResolver.getProperty(FEIGN_CLIENT_NAME_PROPERTY);
if(StringUtils.hasText(name)) {
// we are in a feign context
return name;
}
// we are in a LoadBalancerClientFactory context
name = propertyResolver.getProperty(LoadBalancerClientFactory.PROPERTY_NAME);
Assert.notNull(name, "Could not find a load balancer name within the configuration context!");
return name;
}
#Bean
public ReactorServiceInstanceLoadBalancer reactorServiceInstanceLoadBalancer(
BeanFactory beanFactory, #Qualifier("loadBalancerName") String loadBalancerName
) {
return new CustomRoundRobinLoadBalancer(
beanFactory.getBeanProvider(ServiceInstanceListSupplier.class),
loadBalancerName
);
}
#Bean
#Primary
public ServiceInstanceListSupplier serviceInstanceListSupplier(
#Qualifier(
"filter"
) Predicate<ServiceInstance> filter, DiscoveryClient discoveryClient, Environment environment, #Qualifier(
"loadBalancerName"
) String loadBalancerName
) {
// add service name to environment if necessary
if(environment.getProperty(LoadBalancerClientFactory.PROPERTY_NAME) == null) {
StandardEnvironment wrapped = new StandardEnvironment();
if(environment instanceof ConfigurableEnvironment) {
((ConfigurableEnvironment) environment).getPropertySources()
.forEach(s -> wrapped.getPropertySources().addLast(s));
}
Map<String, Object> additionalProperties = new HashMap<>();
additionalProperties.put(LoadBalancerClientFactory.PROPERTY_NAME, loadBalancerName);
wrapped.getPropertySources().addLast(new MapPropertySource(loadBalancerName, additionalProperties));
environment = wrapped;
}
return new FilteringInstanceListSupplier(filter, discoveryClient, environment);
}
}
There was a change in the ExchangeFilter constructor, but as far as I can tell, it accepts that empty transformer,I don't know if it's supposed to:
public class CustomExchangeFilterFunction extends ReactorLoadBalancerExchangeFilterFunction {
private static final ThreadLocal<ClientRequest> REQUEST_HOLDER = new ThreadLocal<>();
//I think it's wrong but I don't know what to do here
private static List<LoadBalancerClientRequestTransformer> transformersList;
private final Factory<ServiceInstance> loadBalancerFactory;
public CustomExchangeFilterFunction (Factory<ServiceInstance> loadBalancerFactory) {
this(loadBalancerFactory);
///according to docs, but I don't know where and if I need to use this
#Bean
public LoadBalancerClientRequestTransformer transformer() {
return new LoadBalancerClientRequestTransformer() {
#Override
public ClientRequest transformRequest(ClientRequest request, ServiceInstance instance) {
return ClientRequest.from(request)
.header(instance.getInstanceId())
.build();
}
};
}
public CustomExchangeFilterFunction (Factory<ServiceInstance> loadBalancerFactory, List<LoadBalancerClientRequestTransformer> transformersList) {
super(loadBalancerFactory, transformersList); //the changed constructor
this.loadBalancerFactory = loadBalancerFactory;;
}
#Override
public Mono<ClientResponse> filter(ClientRequest request, ExchangeFunction next) {
// put the current request into the thread context - ugly, but couldn't find a better way to access the request within
// the choose method without reimplementing nearly everything
REQUEST_HOLDER.set(request);
try {
return super.filter(request, next);
} finally {
REQUEST_HOLDER.remove();
}
}
//used to be an override, but the function has changed
//code execution doesn't even get this far yet
protected Mono<Response<ServiceInstance>> choose(String serviceId) {
ReactiveLoadBalancer<ServiceInstance> loadBalancer = loadBalancerFactory.getInstance(serviceId);
if(loadBalancer == null) {
return Mono.just(new EmptyResponse());
}
ClientRequest request = REQUEST_HOLDER.get();
// this might be null, if the underlying implementation changed and this method is no longer executed in the same
// thread
// as the filter method
Assert.notNull(request, "request must not be null, underlying implementation seems to have changed");
return choose(loadBalancer, filter);
}
protected Mono<Response<ServiceInstance>> choose(
ReactiveLoadBalancer<ServiceInstance> loadBalancer,
Predicate<ServiceInstance> filter
) {
return Mono.from(loadBalancer.choose(new DefaultRequest<>(filter)));
}
}
There were pretty big changes in the CustomExchangeFilterFunction, but the current execution doesn't even get there. It fails here, in .getIfAvailable(...):
public class CustomRoundRobinLoadBalancer implements ReactorServiceInstanceLoadBalancer {
private static final int DEFAULT_SEED_POSITION = 1000;
private final ObjectProvider<ServiceInstanceListSupplier> serviceInstanceListSupplierProvider;
private final String serviceId;
private final int seedPosition;
private final AtomicInteger position;
private final Map<String, AtomicInteger> positionsForVersions = new HashMap<>();
public CustomRoundRobinLoadBalancer (
ObjectProvider<ServiceInstanceListSupplier> serviceInstanceListSupplierProvider,
String serviceId
) {
this(serviceInstanceListSupplierProvider, serviceId, new Random().nextInt(DEFAULT_SEED_POSITION));
}
public CustomRoundRobinLoadBalancer (
ObjectProvider<ServiceInstanceListSupplier> serviceInstanceListSupplierProvider,
String serviceId,
int seedPosition
) {
Assert.notNull(serviceInstanceListSupplierProvider, "serviceInstanceListSupplierProvider must not be null");
Assert.notNull(serviceId, "serviceId must not be null");
this.serviceInstanceListSupplierProvider = serviceInstanceListSupplierProvider;
this.serviceId = serviceId;
this.seedPosition = seedPosition;
this.position = new AtomicInteger(seedPosition);
}
#Override
// we have no choice but to use the raw type Request here, because this method overrides another one with this signature
public Mono<Response<ServiceInstance>> choose(#SuppressWarnings("rawtypes") Request request) {
//fails here!
ServiceInstanceListSupplier supplier = serviceInstanceListSupplierProvider
.getIfAvailable(NoopServiceInstanceListSupplier::new);
return supplier.get().next().map((List<ServiceInstance> instances) -> getInstanceResponse(instances, request));
}
}
Edit: after some deeper stacktracing, it seems that it does go into the CustomFilterFunction and invokes the constructor with super(loadBalancerFactory, transformer)
I found the problem or a workaround. I was using #LoadBalancerClients because I thought it would just set the same config for all clients that way (even if I technically only have one atm). I changed it to ##LoadBalancerClient and it suddenly worked. I don't quite understand why this made a difference but it did!

Spring Cache Abstraction with Hazelcast Doesn't Evict Key From Cache

With the following configuration, my return object is cached but when I try to evict a key manually it doesnt't work.
#Configuration
#EnableCaching
public class HazelCastConfiguration {
#Bean
public HazelcastCacheManager hazelcastCacheManager() {
return new HazelcastCacheManager(Hazelcast.newHazelcastInstance(hazelcastConfig()));
}
#Bean
public Config hazelcastConfig() {
return new Config()
.setInstanceName("hazelcast-instance")
.addMapConfig(new MapConfig()
.setName("myCache")
.setMaxSizeConfig(new MaxSizeConfig())
.setEvictionPolicy(EvictionPolicy.LRU)
.setStatisticsEnabled(true)
.setTimeToLiveSeconds(-1));
}
}
Cached method:
#Override
#Cacheable(value = "myCache", unless = "#result == null", key = "{#someString, #someLong, #someInteger}")
public List<MyReturnObject> methodWithCachedResults (String someString, Long someLong, Integer someInteger) {
//my logic
}
A sample helper method:
public void evictKey(String aString, Long aLong, Integer anInteger) {
IMap<Object, Object> hazelcastCache = Hazelcast.getHazelcastInstanceByName("hazelcast-instance").getMap("myCache");
hazelcastCache.evict(Arrays.asList(aString, aLong, anInteger));
logger.info("{}", hazelcastCache.keySet());
}
When I trigger the method above, it logs the key even though I force the key to be evicted.
The result is the same when I try with the CacheManager :
#Autowired
private HazelcastCacheManager cacheManager;
public void evictKey(String aString, Long aLong, Integer anInteger) {
cacheManager.getCache("myCache").evict(Arrays.asList(aString, aLong, anInteger));
}
However if I try this, it clears the whole cache which it obviously states:
public void evictKey(String aString, Long aLong, Integer anInteger) {
IMap<Object, Object> hazelcastCache = Hazelcast.getHazelcastInstanceByName("hazelcast-instance").getMap("myCache");
hazelcastCache.clear();
}
By the way, checking keySet().contains(Arrays.asList...) returns true.
It's far from obvious, but there's two implementations of List here.
#Cacheable will create an instance of java.util.ArrayList.
Arrays.asList will create an instance of java.util.Arrays.ArrayList.
This should make it clearer:
public void evictKey(String aString, Long aLong, Integer anInteger) {
IMap<Object, Object> hazelcastCache = Hazelcast.getHazelcastInstanceByName("hazelcast-instance").getMap("myCache");
java.util.List<Object> keyToEvict = Arrays.asList(aString, aLong, anInteger);
boolean success = hazelcastCache.evict(Arrays.asList(aString, aLong, anInteger));
logger.info("Evicted {}, {} == {}", keyToEvict, keyToEvict.getClass(), success);
for (Object key : hazelcastCache.keySet()) {
logger.info("Remaining key {}, {}", key, key.getClass());
}
}

Spring Cloud OpenFeign Failed to Create Dynamic Query Parameters

Spring cloud openFeign can't create dynamic query parameters. It throws below exception because SpringMvcContract tries to find the RequestParam value attribute which doesn't exist.
java.lang.IllegalStateException: RequestParam.value() was empty on parameter 0
#RequestMapping(method = RequestMethod.GET, value = "/orders")
Pageable<Order> searchOrder2(#RequestParam CustomObject customObject);
I tried using #QueryMap instead of #RequestParam but #QueryMap does not generate query parameters.
Btw #RequestParam Map<String, Object> params method parameter works fine to generate a dynamic query parameter.
But I want to use a custom object in which the feign client can generate dynamic query parameters from the object's attributes.
From Spring Cloud OpenFeign Docs:
Spring Cloud OpenFeign provides an equivalent #SpringQueryMap annotation, which is used to annotate a POJO or Map parameter as a query parameter map
So your code should be:
#RequestMapping(method = RequestMethod.GET, value = "/orders")
Pageable<Order> searchOrder2(#SpringQueryMap #ModelAttribute CustomObject customObject);
spring-cloud-starter-feign has a open issue for supporting pojo object as request parameter. Therefore I used a request interceptor that take object from feign method and create query part of url from its fields. Thanks to #charlesvhe
public class DynamicQueryRequestInterceptor implements RequestInterceptor {
private static final Logger LOGGER = LoggerFactory.getLogger(DynamicQueryRequestInterceptor.class);
private static final String EMPTY = "";
#Autowired
private ObjectMapper objectMapper;
#Override
public void apply(RequestTemplate template) {
if ("GET".equals(template.method()) && Objects.nonNull(template.body())) {
try {
JsonNode jsonNode = objectMapper.readTree(template.body());
template.body(null);
Map<String, Collection<String>> queries = new HashMap<>();
buildQuery(jsonNode, EMPTY, queries);
template.queries(queries);
} catch (IOException e) {
LOGGER.error("IOException occurred while try to create http query");
}
}
}
private void buildQuery(JsonNode jsonNode, String path, Map<String, Collection<String>> queries) {
if (!jsonNode.isContainerNode()) {
if (jsonNode.isNull()) {
return;
}
Collection<String> values = queries.computeIfAbsent(path, k -> new ArrayList<>());
values.add(jsonNode.asText());
return;
}
if (jsonNode.isArray()) {
Iterator<JsonNode> it = jsonNode.elements();
while (it.hasNext()) {
buildQuery(it.next(), path, queries);
}
} else {
Iterator<Map.Entry<String, JsonNode>> it = jsonNode.fields();
while (it.hasNext()) {
Map.Entry<String, JsonNode> entry = it.next();
if (StringUtils.hasText(path)) {
buildQuery(entry.getValue(), path + "." + entry.getKey(), queries);
} else {
buildQuery(entry.getValue(), entry.getKey(), queries);
}
}
}
}
}

camelContext attribute discriminator for tenant resolver , using jpa multitenant and camel routeId

i ask you how can use camelContext to get the name of route fired by an event, more in details, how can I use any kind of discriminator attribute x in camelContext for predicate decision (if x =1 then .. else ..)
For example:
I have this kind of route:
//this route use the forst database
from("direct:csvprocessor1")
.routeId("tenant1")
.from("file:src/main/resources/data/1?move=OUT&moveFailed=REFUSED")
.unmarshal(csv)
.to("bean:myCsvHandler?method=doHandleCsvData")
.setBody(constant("OK VB"))
.setHeader(Exchange.HTTP_RESPONSE_CODE, constant(200))
.setHeader(Exchange.CONTENT_TYPE, constant("text/html"));
and this other route:
//this route use tenant2, the second database
from("direct:csvprocessor1")
.routeId("tenant2")
.from("file:src/main/resources/data/2?move=OUT&moveFailed=REFUSED")
.unmarshal(csv)
.to("bean:myCsvHandler?method=doHandleCsvData")
.setBody(constant("OK 2"))
.setHeader(Exchange.HTTP_RESPONSE_CODE, constant(200))
.setHeader(Exchange.CONTENT_TYPE, constant("text/html"));
when i pick up file in 1 folder the first route named "tenant1" starts, the same happen when pick up file in 2, the second route tenant2 starts.It reads csv content and the content must be write using jpa on the right tenantX (database)
I have to retrieve routeid name in another class but this class instanced before the camel Context start so i can't inject context (because this class "BatchCurrentTenantIdentifierResolverImpl " belong to Spring database initializator). I try to add method "of" to set camelContext but i get tenant1 only, also when route 2 starts, so can't switch from tenant to another tenant (tenant is database, i have two database):
#Component
public class BatchTenantContext {
private static final Logger log = LoggerFactory.getLogger(BatchTenantContext.class);
// don't Inject, use method Of because injecton was null
CamelContext cctx;
public BatchTenantContext(){getInstance();}
private final static BatchTenantContext instance = new BatchTenantContext();
public static BatchTenantContext getInstance(){
return instance;
}
public synchronized String get() {
if (cctx != null){
Route val = cctx.getRoute("tenant1");
if (val == null){
val = cctx.getRoute("tenant2");
if (val == null){
return "";
}
else {
return "tenant_2";
}
}
else return "tenant_1";
}
return "";
}
public synchronized void of(CamelContext ctx){
cctx = ctx;
}
public CamelContext getCamelContext()
{
return cctx;
}
}
//multitenant approach, switch from one database to another
//based on BatchTenantContext resolution..
public class BatchCurrentTenantIdentifierResolverImpl implements CurrentTenantIdentifierResolver {
static final Logger log = LoggerFactory.getLogger(BatchCurrentTenantIdentifierResolverImpl.class);
#Override
public String resolveCurrentTenantIdentifier() {
String val = BatchTenantContext.getInstance().get();
log.info("*** get tenant " + val);
return val;
}
#Override
public boolean validateExistingCurrentSessions() {
return true;
}
}
So, how to know how route fire? Note thaht the class above is singleton..I'm in a right way?
I use jpa whitin hibernate provider, configured using rhe multitenant configuration like this post: http://tech.asimio.net/2017/01/17/Multitenant-applications-using-Spring-Boot-JPA-Hibernate-and-Postgres.html
The application work in spring-boot Runtime environment or with Tomcat app server.
Any ideas about all?
Thanks so much!
roby
I add this code:
#Configuration
#EnableConfigurationProperties({ MultiTenantAfSissProperties.class, JpaProperties.class })
#ImportResource(locations = { "classpath:applicationContent.xml" })
#EnableTransactionManagement
public class MultiTenantJpaConfiguration {
static final Logger logger = LoggerFactory.getLogger(MultiTenantJpaConfiguration.class);
#Inject
private JpaProperties jpaProperties;
#Inject
MultiTenantAFSISSProperties multiTenantAFSISSProperties; //lista dei datasources collegati ai tenant
#Bean
public Map<String, DataSource> dataSourceRetrieval(){
Map<String, DataSource> result = new HashMap<>();
for (DataSourceProperties dsProperties : this.multiTenantAFSISSProperties.getDataSources()) {
DataSourceBuilder factory = DataSourceBuilder
.create()
.url(dsProperties.getUrl())
.username(dsProperties.getUsername())
.password(dsProperties.getPassword())
.driverClassName(dsProperties.getDriverClassName());
result.put(dsProperties.getTenantId(), factory.build());
}
return result;
}
/**
*
* #return
*/
#Bean
public MultiTenantConnectionProvider multiTenantConnectionProvider(){
return new AfsissMultiTenantConnectionProviderImpl();
}
/**
*
* #return
*/
#Bean
public CurrentTenantIdentifierResolver currentTenantIdentifierResolver(){
return new BatchCurrentTenantIdentifierResolverImpl();
}
/**
*
* #param multiTenantConnectionProvider
* #param currentTenantIdentifierResolver
* #return
*/
#Bean
public LocalContainerEntityManagerFactoryBean entityManagerFactoryBean(MultiTenantConnectionProvider multiTenantConnectionProvider,
CurrentTenantIdentifierResolver currentTenantIdentifierResolver) {
Map<String, Object> hibernateProps = new LinkedHashMap<>();
hibernateProps.putAll(this.jpaProperties.getProperties());
Map<String,String> all = this.jpaProperties.getProperties();
for ( Map.Entry<String, String> prop : all.entrySet()){
System.out.println(" " + prop.getKey() + " = " + prop.getValue());
}
hibernateProps.put(Environment.MULTI_TENANT, MultiTenancyStrategy.DATABASE);
hibernateProps.put(Environment.MULTI_TENANT_CONNECTION_PROVIDER, multiTenantConnectionProvider);
hibernateProps.put(Environment.MULTI_TENANT_IDENTIFIER_RESOLVER, currentTenantIdentifierResolver);
// No dataSource is set to resulting entityManagerFactoryBean
LocalContainerEntityManagerFactoryBean result = new LocalContainerEntityManagerFactoryBean();
result.setPackagesToScan(new String[] { AfFileEntity.class.getPackage().getName() });
result.setJpaVendorAdapter(new HibernateJpaVendorAdapter());
result.setJpaPropertyMap(hibernateProps);
return result;
}
/**
* crea la factory per ricavare l'entity manager
* #param entityManagerFactoryBean
* #return
*/
#Bean
public EntityManagerFactory entityManagerFactory(LocalContainerEntityManagerFactoryBean entityManagerFactoryBean) {
return entityManagerFactoryBean.getObject();
}
/**
* get transaction manager
* #param entityManagerFactory
* #return
*/
#Bean
public PlatformTransactionManager txManager(EntityManagerFactory entityManagerFactory) {
SessionFactory sessionFactory = entityManagerFactory.unwrap(SessionFactory.class);
HibernateTransactionManager result = new HibernateTransactionManager();
result.setAutodetectDataSource(false);
result.setSessionFactory(sessionFactory);
return result;
}
}
In applicationContent.xml:
<jpa:repositories base-package="com.xxx.dao" transaction-manager-ref="txManager" />
<tx:annotation-driven transaction-manager="txManager" proxy-target-class="true" />
The class BatchCurrentTenantIdentifierResolverImpl is called in currentTenantIdentifierResolver() method above by spring transaction manager every time i use entity manager and transaction manager in csvHanlder :
#Component
#Transactional(propagation = Propagation.REQUIRED)
public class MyCsvHandler {
#Inject
AFMOVCrudRepository _entitymanagerMov; //it extends JpaRepository
#Inject
AFVINCCrudRepository _entityManagerVINC;//it extends JpaRepository
#Inject
AFFileCrudRepository _entityManagerAfFile;//it extends JpaRepository
static final Logger logger = LoggerFactory.getLogger(MyCsvHandler.class);
//save csv data on the right table on the right tenant
public void doHandleCsvData(List<List<String>> csvData) throws FileNotEvaluableException
{
//System.out.println("stampo..");
boolean status = true;
if (csvData.size() > 0){
AfFileEntity afbean = new AfFileEntity();
afbean.setNomeFile("test");
afbean.setDataImport(new java.sql.Timestamp(System.currentTimeMillis()));
afbean.setTipoFile("M");
afbean.setAfStatoFlusso("I");
_entityManagerAfFile.save(afbean);
long pkfile = afbean.getId();
logger.info("pkfile: " + pkfile);
int i = 1;
logger.info("file size:" + csvData.size());
for (List<String> rows : csvData){
//for (int j = 0; i < rows.size(); j++){
if (rows.get(2).trim().equalsIgnoreCase(...)){
MovEntity mbean = new MovEntity();
setMovFields(mbean, rows);
mbean.setAfFileId(afbean);
logger.info(String.valueOf((i++)) + " " + mbean);
_entitymanagerMov.save(mbean);
}
else if (rows.get(2).trim().equalsIgnoreCase(..) || rows.get(2).trim().equalsIgnoreCase(..) ) {
VincEntity vincBean = new VincEntity();
setVincFields(vincBean, rows);
vincBean.setAfFileId(afbean);
logger.info(String.valueOf((i++)) + " " + vincBean);
_entityManagerVINC.save(vincBean);
}
else {
status = false;
break;
}
}
if (!status) throw new FileNotEvaluableException("error file format");
}
}
private void setVincFields(VincEntity vincBean, List<String> rows) {
vincBean.setXXX().. and others methods
}
private void setMovFields(MovEntity mbean, List<String> rows) {
mbean.setStxxx() and other .. methods
}
return new
Something like this in your routes
from("direct:csvprocessor1").routeId("tenant2").process((Exchange e)-> {
BatchCurrentTenantIdentifierResolverImpl.tenant.set("tenant_1");
})
.from("file:src/main/resources/data/2?move=OUT&moveFailed=REFUSED")
.unmarshal().csv()
.to("bean:myCsvHandler?method=doHandleCsvData")
.setBody(constant("OK 2"))
.setHeader(Exchange.HTTP_RESPONSE_CODE, constant(200))
.setHeader(Exchange.CONTENT_TYPE, constant("text/html"));
And in your BatchCurrentTenantIdentifierResolverImpl implement it aspublic
class BatchCurrentTenantIdentifierResolverImpl {
public static ThreadLocal<String> tenant = new ThreadLocal<String>();
static final Logger log = LoggerFactory.getLogger(BatchCurrentTenantIdentifierResolverImpl.class);
#Override
public String resolveCurrentTenantIdentifier() {
String val = tenant.get();
log.info("*** get tenant " + val);
return val;
}
#Override
public boolean validateExistingCurrentSessions() {
return true;
}
}

Multiple Caffeine LoadingCaches added to Spring CaffeineCacheManager

I'm looking to add several distinct LoadingCache's to a Spring CacheManager, however I don't see how this is possible using CaffeineCacheManager. It appears that only a single loader is possible for refreshing content, however I need separate loaders for each cache. Is it possible to add multiple loading caches to a Spring cache manager? If so, then how?
CaffeineCacheManager cacheManage = new CaffeineCacheManager();
LoadingCache<String, Optional<Edition>> loadingCache1 =
Caffeine.newBuilder()
.maximumSize(150)
.refreshAfterWrite(5, TimeUnit.MINUTES)
.build(test -> this.testRepo.find(test));
LoadingCache<String, Optional<Edition>> loadingCache2 =
Caffeine.newBuilder()
.maximumSize(150)
.refreshAfterWrite(5, TimeUnit.MINUTES)
.build(test2 -> this.testRepo.find2(test2));
// How do I add to cache manager, and specify a name?
Yes it is possible. Since you need to fine tune every cache, you are probably better at defining them yourself. Back to your example, the next step would be:
SimpleCacheManager cacheManager = new SimpleCacheManager();
cacheManager.setCaches(Arrays.asList(
new CaffeineCache("first", loadingCache1),
new CaffeineCache("second", loadingCache2)));
And then you can use that as usual, e.g.
#Cacheable("first")
public Foo load(String id) { ... }
If you are using Spring Boot, you can just expose the individual cache as beans (so org.springframework.cache.Cache implementations) and we'll detect them and create a SimpleCacheManager automatically for you.
Note that this strategy allows you to use the cache abstraction with different implementations. first could be a caffeine cache and second a cache from another provider.
Having this class will allow you to use #Cacheable("cacheA") where you want as normal:
#EnableCaching
#Configuration
public class CacheConfiguration {
#Bean
public CacheManager cacheManager() {
CaffeineCacheManager manager = new CaffeineCacheManager();
manager.registerCustomCache("cacheA", defaultCache());
manager.registerCustomCache("cacheB", bigCache());
manager.registerCustomCache("cacheC", longCache());
// to avoid dynamic caches and be sure each name is assigned to a specific config (dynamic = false)
// throws error when tries to use a new cache
manager.setCacheNames(Collections.emptyList());
return manager;
}
private static Cache<Object, Object> defaultCache() {
return Caffeine.newBuilder()
.maximumSize(1000)
.expireAfterWrite(5, TimeUnit.MINUTES)
.build();
}
private static Cache<Object, Object> bigCache() {
return Caffeine.newBuilder()
.maximumSize(5000)
.expireAfterWrite(5, TimeUnit.MINUTES)
.build();
}
private static Cache<Object, Object> longCache() {
return Caffeine.newBuilder()
.maximumSize(1000)
.expireAfterWrite(1, TimeUnit.HOURS)
.build();
}
}
Thanks for #rado, this is improved version of his answer. This way we can configure the cache from application properties directly
cache:
specs:
big-cache:
expire-after: WRITE
timeout: 2h
max-size: 1000
long-cache:
expire-after: ACCESS
timeout: 30d
max-size: 100
We need a cache properties for this
#Data
#EnableConfigurationProperties
#Configuration
#ConfigurationProperties(prefix = "cache")
public class CacheProperties {
private static final int DEFAULT_CACHE_SIZE = 100;
private Map<String, CacheSpec> specs = new HashMap<>();
#Data
public static class CacheSpec {
private Duration timeout;
private Integer maxSize = DEFAULT_CACHE_SIZE;
private ExpireAfter expireAfter = ExpireAfter.WRITE;
}
enum ExpireAfter { WRITE, ACCESS }
}
And then we can configure directly from external config file
#EnableCaching
#Configuration
#RequiredArgsConstructor
public class CacheConfiguration {
private final CacheProperties cacheProperties;
#Bean
public CacheManager cacheManager() {
CaffeineCacheManager manager = new CaffeineCacheManager();
Map<String, CacheProperties.CacheSpec> specs = cacheProperties.getSpecs();
specs.keySet().forEach(cacheName -> {
CacheProperties.CacheSpec spec = specs.get(cacheName);
manager.registerCustomCache(cacheName, buildCache(spec));
});
// to avoid dynamic caches and be sure each name is assigned
// throws error when tries to use a new cache
manager.setCacheNames(Collections.emptyList());
return manager;
}
private Cache<Object, Object> buildCache(CacheProperties.CacheSpec cacheSpec) {
if (cacheSpec.getExpireAfter() == CacheProperties.ExpireAfter.ACCESS) {
return Caffeine.newBuilder()
.expireAfterAccess(cacheSpec.getTimeout())
.build();
}
return Caffeine.newBuilder()
.expireAfterWrite(cacheSpec.getTimeout())
.build();
}
}
Now you can use the cache with using cache name
#Cacheable(cacheNames = "big-cache", key = "{#key}", unless="#result == null")
public Object findByKeyFromBigCache(String key) {
// create the required object and return
}
#Cacheable(cacheNames = "long-cache", key = "{#key}", unless="#result == null")
public Object findByKeyFromLongCache(String key) {
// create the required object and return
}

Resources