Ignite persistence performance hit and metrics - caching

I am trying out native persistence in Apache Ignite. My setup is currently local, single node cluster. I enabled it by adding this property in my data region
<property name="persistenceEnabled" value="true"/>
My full data region configuration is as follows
<bean class="org.apache.ignite.configuration.DataRegionConfiguration">
<property name="name" value="dr.local.input.trade"/>
<property name="persistenceEnabled" value="true"/>
<property name="metricsEnabled" value="true"/>
<property name="initialSize" value="#{200 * 1024 * 1024}"/>
<property name="maxSize" value="#{500 * 1024 * 1024}"/>
<property name="pageEvictionMode" value="RANDOM_2_LRU"/>
</bean>
Now the entries are being persisted, i.e if I shutdown Ignite and restart it then my data comes back inside the cache.
I am seeing significant performance hit. Around 35% increased put operation latency compared to non-persisted data region. I have referred to Ignite persistence tuning page. From that I have singled out below properties and their properties
Property
Value
WAL Modes
LOG_ONLY
walCompactionLevel
3
walCompationEnabled
true
writeThrottlingEnabled
true
checkpointBufferSize
512 mb
checkpointFrequency
5 minutes
Is there anything more that I can tune? Is the performance hit I mentioned above is typical or can it be lowered much more?
Also I tried seeing JMX metrics related to persistence using JConsole. I was checking metrics under org.apache.368239c8.ignitelocal."Persistent Store". All metrics mentioned under this are showing as 0. Data is surely persisted, I can see in Ignite work dir and WAL dir. Am I looking at wrong metrics? Please help.
Attaching entire Ignite config below.
<?xml version="1.0" encoding="UTF-8"?>
<!--
Generated by Chef for ignite1.intranet.com
-->
<beans xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:util="http://www.springframework.org/schema/util"
xmlns="http://www.springframework.org/schema/beans"
xsi:schemaLocation="
http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd
http://www.springframework.org/schema/util
http://www.springframework.org/schema/util/spring-util.xsd">
<bean id="propertyConfigurer" class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer">
<property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_FALLBACK"/>
<property name="searchSystemEnvironment" value="true"/>
</bean>
<bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
<!-- Set to true to enable distributed class loading for examples, default is false. -->
<property name="sslContextFactory">
<bean class="org.apache.ignite.ssl.SslContextFactory">
<property name="keyStoreFilePath" value="/home/sysSvcDevOps/ssl/ignite1.keystore.jks"/>
<property name="keyStorePassword" value="KeyStore443"/>
<property name="keyStoreType" value="jks"/>
<property name="trustStoreFilePath" value="/home/sysSvcDevOps/ssl/cacerts/java.cacerts.jks"/>
<property name="trustStorePassword" value="changeit"/>
<property name="trustStoreType" value="jks"/>
</bean>
</property>
<property name="igniteInstanceName" value=".dev"/>
<property name="consistentId" value="ignite1.dev"/>
<property name="workDirectory" value="/apps/Svc/dev/Ignite/IgniteData/persistentstore/work"/>
<property name="rebalanceThreadPoolSize" value="8"/>
<property name="publicThreadPoolSize" value="32"/>
<property name="systemThreadPoolSize" value="64"/>
<property name="queryThreadPoolSize" value="64"/>
<property name="failureDetectionTimeout" value="30000"/>
<property name="authenticationEnabled" value="true"/>
<property name="metricsUpdateFrequency" value="30000"/>
<property name="peerClassLoadingEnabled" value="false"/>
<property name="clientMode" value="false"/>
<!-- Enable task execution events for examples. -->
<property name="includeEventTypes">
<list>
<util:constant static-field="org.apache.ignite.events.EventType.EVT_CACHE_STARTED"/>
<util:constant static-field="org.apache.ignite.events.EventType.EVT_CACHE_STOPPED"/>
<util:constant static-field="org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_PART_DATA_LOST"/>
<util:constant static-field="org.apache.ignite.events.EventType.EVT_CACHE_NODES_LEFT"/>
</list>
</property>
<property name="dataStorageConfiguration">
<bean class="org.apache.ignite.configuration.DataStorageConfiguration">
<property name="walSegmentSize" value="1073741824"/>
<property name="walSegments" value="20"/>
<property name="maxWalArchiveSize" value="10737418240"/>
<property name="walCompactionEnabled" value="true"/>
<property name="walCompactionLevel" value="4"/>
<property name="checkpointFrequency" value="300000"/>
<property name="checkpointThreads" value="16"/>
<property name="checkpointReadLockTimeout" value="60000"/>
<property name="lockWaitTime" value="45000"/>
<property name="checkpointWriteOrder" value="RANDOM"/>
<property name="pageSize" value="4096"/>
<property name="writeThrottlingEnabled" value="true"/>
<!-- wal storage paths -->
<property name="walPath" value="/apps/Svc/dev/Ignite/IgniteData"/>
<property name="walArchivePath" value="/apps/Svc/dev/Ignite/IgniteDataArchive"/>
<property name="storagePath" value="/apps/Svc/dev/Ignite/IgniteData/archive"/>
<property name="dataRegionConfigurations">
<list>
<bean class="org.apache.ignite.configuration.DataRegionConfiguration">
<property name="name" value="dr.dev.referencedata"/>
<property name="persistenceEnabled" value="true"/>
<property name="initialSize" value="1073741824"/>
<property name="maxSize" value="4294969673"/>
<property name="checkpointPageBufferSize" value="1073741824"/>
</bean>
<bean class="org.apache.ignite.configuration.DataRegionConfiguration">
<property name="name" value="dr.dev.input"/>
<property name="persistenceEnabled" value="true"/>
<property name="metricsEnabled" value="true"/>
<property name="checkpointPageBufferSize" value="#{4 * 1024 * 1024 * 1024}"/>
<property name="initialSize" value="12884901888"/>
<property name="maxSize" value="81604378624"/>
<property name="pageEvictionMode" value="RANDOM_2_LRU"/>
</bean>
<bean class="org.apache.ignite.configuration.DataRegionConfiguration">
<property name="name" value="dr.dev.input.exception"/>
<property name="persistenceEnabled" value="true"/>
<property name="metricsEnabled" value="true"/>
<property name="checkpointPageBufferSize" value="#{4 * 1024 * 1024 * 1024}"/>
<property name="initialSize" value="4294967296"/>
<property name="maxSize" value="21474836480"/>
<property name="pageEvictionMode" value="RANDOM_2_LRU"/>
</bean>
<bean class="org.apache.ignite.configuration.DataRegionConfiguration">
<property name="name" value="dr.dev.output"/>
<property name="initialSize" value="1073741824"/>
<property name="persistenceEnabled" value="true"/>
<property name="metricsEnabled" value="true"/>
<property name="checkpointPageBufferSize" value="#{2 * 1024 * 1024 * 1024}"/>
<property name="maxSize" value="2147483648"/>
</bean>
</list>
</property>
<property name="defaultDataRegionConfiguration">
<bean class="org.apache.ignite.configuration.DataRegionConfiguration">
<property name="name" value="default_region"/>
<property name="persistenceEnabled" value="true"/>
<property name="initialSize" value="268435456"/>
<property name="maxSize" value="268435456"/>
</bean>
</property>
</bean>
</property>
<property name="discoverySpi">
<bean class="org.apache.ignite.spi.discovery.zk.ZookeeperDiscoverySpi">
<property name="zkConnectionString" value="zk1.intranet.com:22001,zk2.intranet.com:22001"/>
<property name="zkRootPath" value="/ignite"/>
<property name="sessionTimeout" value="120000"/>
<property name="joinTimeout" value="10000"/>
</bean>
</property>
<property name="communicationSpi">
<bean class="org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi">
<property name="socketWriteTimeout" value="60000"/>
</bean>
</property>
<property name="cacheConfiguration">
<list>
<bean id="cache-template-bean" abstract="true"
class="org.apache.ignite.configuration.CacheConfiguration">
<property name="name" value="referenceDataCacheTemplate*"/>
<property name="cacheMode" value="REPLICATED"/>
<property name="backups" value="1"/>
<property name="atomicityMode" value="ATOMIC"/>
<property name="dataRegionName" value="dr.dev.referencedata"/>
<property name="partitionLossPolicy" value="READ_WRITE_SAFE"/>
<property name="writeSynchronizationMode" value="PRIMARY_SYNC"/>
<property name="statisticsEnabled" value="true"/>
<property name="sqlIndexMaxInlineSize" value="203"/>
<property name="affinity">
<bean class="org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction">
<property name="partitions" value="256"/>
</bean>
</property>
</bean>
<bean id="cache-template-bean" abstract="true"
class="org.apache.ignite.configuration.CacheConfiguration">
<property name="name" value="inputMetadataCacheTemplate*"/>
<property name="cacheMode" value="PARTITIONED"/>
<property name="backups" value="1"/>
<property name="atomicityMode" value="ATOMIC"/>
<property name="dataRegionName" value="dr.dev.input"/>
<property name="partitionLossPolicy" value="READ_WRITE_SAFE"/>
<property name="writeSynchronizationMode" value="PRIMARY_SYNC"/>
<property name="statisticsEnabled" value="true"/>
<property name="readFromBackup" value="false"/>
<property name="sqlIndexMaxInlineSize" value="211"/>
<property name="affinity">
<bean class="org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction">
<property name="partitions" value="256"/>
<property name="affinityBackupFilter">
<bean class="org.apache.ignite.cache.affinity.rendezvous.ClusterNodeAttributeAffinityBackupFilter">
<constructor-arg>
<array value-type="java.lang.String">
<value>RACK_ID</value>
</array>
</constructor-arg>
</bean>
</property>
</bean>
</property>
<property name="expiryPolicyFactory">
<bean class="javax.cache.expiry.ModifiedExpiryPolicy" factory-method="factoryOf">
<constructor-arg>
<bean class="javax.cache.expiry.Duration">
<constructor-arg value="DAYS"/>
<constructor-arg value="5"/>
</bean>
</constructor-arg>
</bean>
</property>
</bean>
<bean id="cache-template-bean" abstract="true"
class="org.apache.ignite.configuration.CacheConfiguration">
<property name="name" value="inputReconCacheTemplate*"/>
<property name="cacheMode" value="PARTITIONED"/>
<property name="backups" value="1"/>
<property name="atomicityMode" value="ATOMIC"/>
<property name="dataRegionName" value="dr.dev.input"/>
<property name="partitionLossPolicy" value="READ_WRITE_SAFE"/>
<property name="writeSynchronizationMode" value="PRIMARY_SYNC"/>
<property name="statisticsEnabled" value="true"/>
<property name="readFromBackup" value="false"/>
<property name="sqlIndexMaxInlineSize" value="211"/>
<property name="affinity">
<bean class="org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction">
<property name="partitions" value="256"/>
<property name="affinityBackupFilter">
<bean class="org.apache.ignite.cache.affinity.rendezvous.ClusterNodeAttributeAffinityBackupFilter">
<constructor-arg>
<array value-type="java.lang.String">
<value>RACK_ID</value>
</array>
</constructor-arg>
</bean>
</property>
</bean>
</property>
<property name="expiryPolicyFactory">
<bean class="javax.cache.expiry.CreatedExpiryPolicy" factory-method="factoryOf">
<constructor-arg>
<bean class="javax.cache.expiry.Duration">
<constructor-arg value="DAYS"/>
<constructor-arg value="4"/>
</bean>
</constructor-arg>
</bean>
</property>
</bean>
<bean id="cache-template-bean" abstract="true"
class="org.apache.ignite.configuration.CacheConfiguration">
<property name="name" value="inputExceptionsCacheTemplate*"/>
<property name="cacheMode" value="PARTITIONED"/>
<property name="backups" value="1"/>
<property name="atomicityMode" value="ATOMIC"/>
<property name="dataRegionName" value="dr.dev.input.exception"/>
<property name="partitionLossPolicy" value="READ_WRITE_SAFE"/>
<property name="writeSynchronizationMode" value="PRIMARY_SYNC"/>
<property name="statisticsEnabled" value="true"/>
<property name="readFromBackup" value="false"/>
<property name="affinity">
<bean class="org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction">
<property name="partitions" value="256"/>
<property name="affinityBackupFilter">
<bean class="org.apache.ignite.cache.affinity.rendezvous.ClusterNodeAttributeAffinityBackupFilter">
<constructor-arg>
<array value-type="java.lang.String">
<value>RACK_ID</value>
</array>
</constructor-arg>
</bean>
</property>
</bean>
</property>
<property name="expiryPolicyFactory">
<bean class="javax.cache.expiry.CreatedExpiryPolicy" factory-method="factoryOf">
<constructor-arg>
<bean class="javax.cache.expiry.Duration">
<constructor-arg value="DAYS"/>
<constructor-arg value="15"/>
</bean>
</constructor-arg>
</bean>
</property>
</bean>
<bean id="cache-template-bean" abstract="true"
class="org.apache.ignite.configuration.CacheConfiguration">
<property name="name" value="outputDataCacheTemplate*"/>
<property name="cacheMode" value="PARTITIONED"/>
<property name="backups" value="1"/>
<property name="atomicityMode" value="ATOMIC"/>
<property name="dataRegionName" value="dr.dev.output"/>
<property name="partitionLossPolicy" value="READ_WRITE_SAFE"/>
<property name="writeSynchronizationMode" value="PRIMARY_SYNC"/>
<property name="sqlSchema" value=""/>
<property name="statisticsEnabled" value="true"/>
<property name="affinity">
<bean class="org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction">
<property name="partitions" value="256"/>
<property name="affinityBackupFilter">
<bean class="org.apache.ignite.cache.affinity.rendezvous.ClusterNodeAttributeAffinityBackupFilter">
<constructor-arg>
<array value-type="java.lang.String">
<value>RACK_ID</value>
</array>
</constructor-arg>
</bean>
</property>
</bean>
</property>
<property name="expiryPolicyFactory">
<bean class="javax.cache.expiry.CreatedExpiryPolicy" factory-method="factoryOf">
<constructor-arg>
<bean class="javax.cache.expiry.Duration">
<constructor-arg value="DAYS"/>
<constructor-arg value="450"/>
</bean>
</constructor-arg>
</bean>
</property>
</bean>
<bean id="cache-template-bean" abstract="true"
class="org.apache.ignite.configuration.CacheConfiguration">
<property name="name" value="reconAuditDataCacheTemplate*"/>
<property name="cacheMode" value="PARTITIONED"/>
<property name="backups" value="1"/>
<property name="atomicityMode" value="ATOMIC"/>
<property name="dataRegionName" value="dr.dev.referencedata"/>
<property name="partitionLossPolicy" value="READ_WRITE_SAFE"/>
<property name="writeSynchronizationMode" value="PRIMARY_SYNC"/>
<property name="sqlSchema" value=""/>
<property name="statisticsEnabled" value="true"/>
<property name="affinity">
<bean class="org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction">
<property name="partitions" value="256"/>
<property name="affinityBackupFilter">
<bean class="org.apache.ignite.cache.affinity.rendezvous.ClusterNodeAttributeAffinityBackupFilter">
<constructor-arg>
<array value-type="java.lang.String">
<value>RACK_ID</value>
</array>
</constructor-arg>
</bean>
</property>
</bean>
</property>
</bean>
<bean id="cache-template-bean" abstract="true"
class="org.apache.ignite.configuration.CacheConfiguration">
<property name="name" value="fileDataCacheTemplate*"/>
<property name="cacheMode" value="PARTITIONED"/>
<property name="backups" value="1"/>
<property name="atomicityMode" value="ATOMIC"/>
<property name="dataRegionName" value="dr.dev.input"/>
<property name="partitionLossPolicy" value="READ_WRITE_SAFE"/>
<property name="writeSynchronizationMode" value="PRIMARY_SYNC"/>
<property name="statisticsEnabled" value="true"/>
<property name="queryParallelism" value="4"/>
<property name="eagerTtl" value="true"/>
<property name="affinity">
<bean class="org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction">
<property name="partitions" value="256"/>
<property name="affinityBackupFilter">
<bean class="org.apache.ignite.cache.affinity.rendezvous.ClusterNodeAttributeAffinityBackupFilter">
<constructor-arg>
<array value-type="java.lang.String">
<value>RACK_ID</value>
</array>
</constructor-arg>
</bean>
</property>
</bean>
</property>
<property name="expiryPolicyFactory">
<bean class="javax.cache.expiry.CreatedExpiryPolicy" factory-method="factoryOf">
<constructor-arg>
<bean class="javax.cache.expiry.Duration">
<constructor-arg value="DAYS"/>
<constructor-arg value="5"/>
</bean>
</constructor-arg>
</bean>
</property>
</bean>
<bean id="cache-template-bean" abstract="true"
class="org.apache.ignite.configuration.CacheConfiguration">
<property name="name" value="shortLivedReferenceDataTemplate*"/>
<property name="cacheMode" value="PARTITIONED"/>
<property name="backups" value="1"/>
<property name="atomicityMode" value="ATOMIC"/>
<property name="dataRegionName" value="dr.dev.input.exception"/>
<property name="partitionLossPolicy" value="READ_WRITE_SAFE"/>
<property name="writeSynchronizationMode" value="PRIMARY_SYNC"/>
<property name="statisticsEnabled" value="true"/>
<property name="managementEnabled" value="true"/>
<property name="affinity">
<bean class="org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction">
<property name="partitions" value="64"/>
<property name="affinityBackupFilter">
<bean class="org.apache.ignite.cache.affinity.rendezvous.ClusterNodeAttributeAffinityBackupFilter">
<constructor-arg>
<array value-type="java.lang.String">
<value>RACK_ID</value>
</array>
</constructor-arg>
</bean>
</property>
</bean>
</property>
<property name="expiryPolicyFactory">
<bean class="javax.cache.expiry.CreatedExpiryPolicy" factory-method="factoryOf">
<constructor-arg>
<bean class="javax.cache.expiry.Duration">
<constructor-arg value="DAYS"/>
<constructor-arg value="2"/>
</bean>
</constructor-arg>
</bean>
</property>
</bean>
</list>
</property>
<property name="sqlSchemas">
<list>
<value>dataInput</value>
</list>
</property>
</bean>
</beans>

Speaking of the possible performance drop on writes.
In comparison to a pure in memory mode, the following disk interactions happen on updates:
in addition to a page modification in RAM, Ignite needs to provide consistency guarantees depending on your WAL mode, but unless it's not disabled, every update must be written to a WAL file. No data is flushed on disk yet; modification happens only in memory + WAL record is written.
Once you have too many dirty pages in RAM, or a timeout occurs, Ignite starts a checkpointing process flushing dirty pages on disk to the partition files on disk.
If WAL becomes too big, Ignite might perform segments rotation by copying them to a WAL archive to free up the space for new WAL updates.
As you can see, there are at least 3 major disk-related operations, meaning that it's crucial to have really fast disks for /wal, /walarchive and /db mounted folders. Again, it all depends on your use case, but in general it's strongly recommended to have the fastest available disks for WAL-related activity.
Possible performance drop on reads.
Again, it depends on a scenario, but if you can put all your data in memory (as it was before you turned persistence on), you will not see any performance differences.
It should be noted that after a restart, there will be no data in RAM at the start and Ignite must preload them first, i.e. to do a warm-up.
But, if you have more data than your configured data region size, a page replacement will take place rotating the data from and to disk. Worse scenario: say, you have a 10 GB RAM data region and 11 GB dataset. And you want to scan your data twice in alphabetical order.
There was no data in RAM yet; imagine that you did a restart. Ignite starts to read data from the disk and populate the data pages in memory. Imagine that after the letter W, our in-memory data set became full, and page rotation is required to load the remaining W-Z data. In that case, the oldest pages need to be evicted - meaning that, say, A-D chunk needs to go to the disk to load W-Z data instead. So, your in-memory data set is now something like W-Z, E-V. If we are going to make the same scan query, the whole data set needs to be replaced similarly.
Enable persistence metrics.
Check that you have the following property in your data region configuration, more details here.
<property name="metricsEnabled" value="true"/>
Also, there is no need for
<property name="pageEvictionMode" value="RANDOM_2_LRU"/>
It's only for non-persistent regions.

Related

Dynamically create cache in Ignite server node and link it to PostgreSql table

I have one Ignite server node and one thick java client.
I'm able to create new caches in the server node by calling the Client node (I have exposed REST APIs).
I have a PostgreSQL DB where schema separated multitenancy is implemented, meaning:
Here Table1 in Schema1 and Schema2 is same by the properties. As it belongs to different schemas it will hold values for different tenants.
.
Here in PostgreSQL dynamically new schemas and tables could be created when new tenant is part of the project.
.
I was able to create a configuration where Table1 from all the existing schemas are loaded to Ignite Server node and values are loaded into tables from DB.
Through the client, I am able to get values as well.
Problem:
I'm not able to create a cache (for the newly created table in the new schema) from the Client node and link it to PostgreSQL.
I couldn't get a straightforward solution to my issue in Ignite developer documents.
Can anyone help me what should be the proper way to tackle this, and also link to an example where dynamically cache is created and linked to DB.
I get the following exception, I know I haven't attached code, if you need code to understand the problem better then I will push the code to github and link here in the question.
Dynamic cache creation: https://www.gridgain.com/docs/latest/developers-guide/key-value-api/basic-cache-operations
Here using ccfg.setCacheStoreFactory(cacheStoreFactory) I have linked DB.
NOTE: If I run the client in server mode "cfg.setClientMode(false)" then I'm able to successfully create a new cache and link to DB.
Does that mean new caches can be created only in Servers.?
2021-07-23 00:03:39.574 ERROR 8036 --- [nio-8081-exec-1] o.a.c.c.C.[.[.[/].[dispatcherServlet] : Servlet.service() for servlet [dispatcherServlet] in context with path [] threw exception [Request processing failed; nested exception is javax.cache.CacheException: class org.apache.ignite.IgniteCheckedException: Failed to complete exchange process.] with root cause
org.apache.ignite.IgniteCheckedException: Failed to complete exchange process.
at org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture.createExchangeException(GridDhtPartitionsExchangeFuture.java:3372) ~[ignite-core-8.7.9.jar:8.7.9]
at org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture.sendExchangeFailureMessage(GridDhtPartitionsExchangeFuture.java:3400) ~[ignite-core-8.7.9.jar:8.7.9]
at org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture.finishExchangeOnCoordinator(GridDhtPartitionsExchangeFuture.java:3496) ~[ignite-core-8.7.9.jar:8.7.9]
at org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture.onAllReceived(GridDhtPartitionsExchangeFuture.java:3477) ~[ignite-core-8.7.9.jar:8.7.9]
at org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture.distributedExchange(GridDhtPartitionsExchangeFuture.java:1608) ~[ignite-core-8.7.9.jar:8.7.9]
at org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture.init(GridDhtPartitionsExchangeFuture.java:929) ~[ignite-core-8.7.9.jar:8.7.9]
at org.apache.ignite.internal.processors.cache.GridCachePartitionExchangeManager$ExchangeWorker.body0(GridCachePartitionExchangeManager.java:3251) ~[ignite-core-8.7.9.jar:8.7.9]
at org.apache.ignite.internal.processors.cache.GridCachePartitionExchangeManager$ExchangeWorker.body(GridCachePartitionExchangeManager.java:3097) ~[ignite-core-8.7.9.jar:8.7.9]
at org.apache.ignite.internal.util.worker.GridWorker.run(GridWorker.java:119) ~[ignite-core-8.7.9.jar:8.7.9]
at java.lang.Thread.run(Thread.java:748) ~[na:na]
Suppressed: org.apache.ignite.IgniteCheckedException: Failed to initialize exchange locally [locNodeId=2753929a-755e-4243-b8d0-693e42b1a078]
at org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture.onCacheChangeRequest(GridDhtPartitionsExchangeFuture.java:1345) ~[ignite-core-8.7.9.jar:8.7.9]
at org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture.init(GridDhtPartitionsExchangeFuture.java:855) ~[ignite-core-8.7.9.jar:8.7.9]
... 4 common frames omitted
Caused by: org.apache.ignite.IgniteException: Failed to enrich cache configuration [cacheName=Users_cuddle_nand]
at org.apache.ignite.internal.processors.cache.CacheConfigurationEnricher.enrich(CacheConfigurationEnricher.java:128)
at org.apache.ignite.internal.processors.cache.CacheConfigurationEnricher.enrich(CacheConfigurationEnricher.java:61)
at org.apache.ignite.internal.processors.cache.GridCacheProcessor.prepareCacheContext(GridCacheProcessor.java:1881)
at org.apache.ignite.internal.processors.cache.GridCacheProcessor.prepareCacheStart(GridCacheProcessor.java:1849)
at org.apache.ignite.internal.processors.cache.GridCacheProcessor.lambda$prepareStartCaches$55a0e703$1(GridCacheProcessor.java:1724)
at org.apache.ignite.internal.processors.cache.GridCacheProcessor.lambda$prepareStartCachesIfPossible$14(GridCacheProcessor.java:1694)
at org.apache.ignite.internal.processors.cache.GridCacheProcessor.prepareStartCaches(GridCacheProcessor.java:1721)
at org.apache.ignite.internal.processors.cache.GridCacheProcessor.prepareStartCachesIfPossible(GridCacheProcessor.java:1692)
at org.apache.ignite.internal.processors.cache.CacheAffinitySharedManager.processCacheStartRequests(CacheAffinitySharedManager.java:971)
at org.apache.ignite.internal.processors.cache.CacheAffinitySharedManager.onCacheChangeRequest(CacheAffinitySharedManager.java:857)
at org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture.onCacheChangeRequest(GridDhtPartitionsExchangeFuture.java:1334)
... 5 common frames omitted
Caused by: org.apache.ignite.IgniteException: Failed to deserialize field storeFactory
at org.apache.ignite.internal.processors.cache.CacheConfigurationEnricher.deserialize(CacheConfigurationEnricher.java:153)
at org.apache.ignite.internal.processors.cache.CacheConfigurationEnricher.enrich(CacheConfigurationEnricher.java:121)
... 15 common frames omitted
Caused by: org.apache.ignite.IgniteCheckedException: Failed to deserialize object with given class loader: sun.misc.Launcher$AppClassLoader#18b4aac2
at org.apache.ignite.marshaller.jdk.JdkMarshaller.unmarshal0(JdkMarshaller.java:148)
at org.apache.ignite.marshaller.AbstractNodeNameAwareMarshaller.unmarshal(AbstractNodeNameAwareMarshaller.java:92)
at org.apache.ignite.marshaller.jdk.JdkMarshaller.unmarshal0(JdkMarshaller.java:162)
at org.apache.ignite.marshaller.AbstractNodeNameAwareMarshaller.unmarshal(AbstractNodeNameAwareMarshaller.java:80)
at org.apache.ignite.internal.util.IgniteUtils.unmarshal(IgniteUtils.java:10478)
at org.apache.ignite.internal.processors.cache.CacheConfigurationEnricher.deserialize(CacheConfigurationEnricher.java:150)
... 16 common frames omitted
Caused by: java.lang.ClassCastException: cannot assign instance of java.lang.invoke.SerializedLambda to field org.apache.ignite.cache.store.jdbc.CacheJdbcPojoStoreFactory.dataSrcFactory of type javax.cache.configuration.Factory in instance of org.apache.ignite.cache.store.jdbc.CacheJdbcPojoStoreFactory
at java.io.ObjectStreamClass$FieldReflector.setObjFieldValues(ObjectStreamClass.java:2301)
at java.io.ObjectStreamClass.setObjFieldValues(ObjectStreamClass.java:1431)
at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2411)
at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2329)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2187)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1667)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:503)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:461)
at org.apache.ignite.marshaller.jdk.JdkMarshaller.unmarshal0(JdkMarshaller.java:140)
... 21 common frames omitted
My guess is that you cannot easily create new caches in Postgres via Ignite because all caches must be provided in the config and they must match the base table in Postgres as well.
Some time ago I integrate Ignite with Postgres and I was able to use the following configuration:
<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:util="http://www.springframework.org/schema/util"
xsi:schemaLocation="
http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.1.xsd
http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util-3.1.xsd">
<bean id="postgre_con" class="org.springframework.jdbc.datasource.DriverManagerDataSource">
<property name="driverClassName" value="org.postgresql.Driver" />
<property name="url" value="jdbc:postgresql://localhost:5432/postgres" />
<property name="username" value="postgres" />
<property name="password" value="qwerty" />
<property name="connectionProperties">
<props>
<prop key="socketTimeout">10</prop>
</props>
</property>
</bean>
<bean id="grid.cfg"
class="org.apache.ignite.configuration.IgniteConfiguration">
<property name="failureDetectionTimeout" value="60000"/>
<property name="clientFailureDetectionTimeout" value="60000"/>
<property name="cacheConfiguration">
<list>
<bean class="org.apache.ignite.configuration.CacheConfiguration">
<property name="name" value="Person"/>
<property name="cacheMode" value="PARTITIONED"/>
<property name="atomicityMode" value="TRANSACTIONAL"/>
<property name="sqlSchema" value="PUBLIC"/>
<property name="keyConfiguration">
<list>
<bean class="org.apache.ignite.cache.CacheKeyConfiguration">
<constructor-arg name="keyCls" value="org.gridgain.essilor.model.Person"/>
</bean>
</list>
</property>
<property name="cacheStoreFactory">
<bean class="org.apache.ignite.cache.store.jdbc.CacheJdbcPojoStoreFactory">
<property name="dataSourceBean" value="postgre_con"/>
<property name="dialect">
<bean class="org.apache.ignite.cache.store.jdbc.dialect.BasicJdbcDialect"/>
</property>
<property name="types">
<list>
<bean class="org.apache.ignite.cache.store.jdbc.JdbcType">
<property name="cacheName" value="Person"/>
<property name="keyType" value="java.lang.Integer"/>
<property name="valueType" value="org.gridgain.essilor.model.Person"/>
<property name="databaseSchema" value="PUBLIC"/>
<property name="databaseTable" value="Person"/>
<property name="keyFields">
<list>
<bean class="org.apache.ignite.cache.store.jdbc.JdbcTypeField">
<constructor-arg>
<util:constant static-field="java.sql.Types.INTEGER"/>
</constructor-arg>
<constructor-arg value="person_id"/>
<constructor-arg value="int"/>
<constructor-arg value="person_id"/>
</bean>
</list>
</property>
<property name="valueFields">
<list>
<bean class="org.apache.ignite.cache.store.jdbc.JdbcTypeField">
<constructor-arg>
<util:constant static-field="java.sql.Types.INTEGER"/>
</constructor-arg>
<constructor-arg value="company_id"/>
<constructor-arg value="int"/>
<constructor-arg value="company_id"/>
</bean>
<bean class="org.apache.ignite.cache.store.jdbc.JdbcTypeField">
<constructor-arg>
<util:constant static-field="java.sql.Types.VARCHAR"/>
</constructor-arg>
<constructor-arg value="person_name"/>
<constructor-arg value="java.lang.String"/>
<constructor-arg value="person_name"/>
</bean>
</list>
</property>
</bean>
</list>
</property>
</bean>
</property>
<property name="readThrough" value="true"/>
<property name="writeThrough" value="true"/>
<!-- Configure type metadata to enable queries. -->
<property name="queryEntities">
<list>
<bean class="org.apache.ignite.cache.QueryEntity">
<property name="keyType" value="java.lang.Integer"/>
<property name="valueType" value="org.gridgain.essilor.model.Person"/>
<property name="tableName" value="Person"/>
<property name="keyFieldName" value="person_id"/>
<property name="fields">
<map>
<entry key="person_id" value="java.lang.Integer"/>
<entry key="company_id" value="java.lang.Integer"/>
<entry key="person_name" value="java.lang.String"/>
</map>
</property>
</bean>
</list>
</property>
</bean>
<bean class="org.apache.ignite.configuration.CacheConfiguration">
<property name="name" value="Company"/>
<property name="cacheMode" value="PARTITIONED"/>
<property name="atomicityMode" value="TRANSACTIONAL"/>
<property name="sqlSchema" value="PUBLIC"/>
<property name="keyConfiguration">
<list>
<bean class="org.apache.ignite.cache.CacheKeyConfiguration">
<constructor-arg name="keyCls" value="org.gridgain.essilor.model.Company"/>
</bean>
</list>
</property>
<property name="cacheStoreFactory">
<bean class="org.apache.ignite.cache.store.jdbc.CacheJdbcPojoStoreFactory">
<property name="dataSourceBean" value="postgre_con"/>
<property name="dialect">
<bean class="org.apache.ignite.cache.store.jdbc.dialect.BasicJdbcDialect"/>
</property>
<property name="types">
<list>
<bean class="org.apache.ignite.cache.store.jdbc.JdbcType">
<property name="cacheName" value="Company"/>
<property name="keyType" value="java.lang.Integer"/>
<property name="valueType" value="org.gridgain.essilor.model.Company"/>
<property name="databaseSchema" value="PUBLIC"/>
<property name="databaseTable" value="Company"/>
<property name="keyFields">
<list>
<bean class="org.apache.ignite.cache.store.jdbc.JdbcTypeField">
<constructor-arg>
<util:constant static-field="java.sql.Types.INTEGER"/>
</constructor-arg>
<constructor-arg value="company_id"/>
<constructor-arg value="int"/>
<constructor-arg value="company_id"/>
</bean>
</list>
</property>
<property name="valueFields">
<list>
<bean class="org.apache.ignite.cache.store.jdbc.JdbcTypeField">
<constructor-arg>
<util:constant static-field="java.sql.Types.VARCHAR"/>
</constructor-arg>
<constructor-arg value="company_name"/>
<constructor-arg value="java.lang.String"/>
<constructor-arg value="company_name"/>
</bean>
</list>
</property>
</bean>
</list>
</property>
</bean>
</property>
<property name="readThrough" value="true"/>
<property name="writeThrough" value="true"/>
<property name="queryEntities">
<list>
<bean class="org.apache.ignite.cache.QueryEntity">
<property name="keyType" value="java.lang.Integer"/>
<property name="valueType" value="org.gridgain.essilor.model.Company"/>
<property name="tableName" value="Company"/>
<property name="keyFieldName" value="company_id"/>
<property name="fields">
<map>
<entry key="company_id" value="java.lang.Integer"/>
<entry key="company_name" value="java.lang.String"/>
</map>
</property>
</bean>
</list>
</property>
</bean>
</list>
</property>
<property name="dataStorageConfiguration">
<bean class="org.apache.ignite.configuration.DataStorageConfiguration">
<property name="defaultDataRegionConfiguration">
<bean class="org.apache.ignite.configuration.DataRegionConfiguration">
<property name="persistenceEnabled" value="true"/>
<property name="name" value="Default_Region"/>
<property name="initialSize" value="#{500L * 1024 * 1024}"/>
<property name="maxSize" value="#{1L * 1024 * 1024 * 1024}"/>
</bean>
</property>
<property name="walMode" value="LOG_ONLY"/>
<property name="writeThrottlingEnabled" value="true"/>
<property name="pageSize" value="4096"/>
</bean>
</property>
<property name="discoverySpi">
<bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
<property name="ipFinder">
<bean
class="org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder">
<property name="addresses">
<list>
<value>127.0.0.1:47500..47501</value>
</list>
</property>
</bean>
</property>
</bean>
</property>
<property name="communicationSpi">
<bean
class="org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi">
<property name="localPort" value="48100"/>
<property name="localPortRange" value="10"/>
<property name="socketWriteTimeout" value="300000"/>
</bean>
</property>
</bean>
</beans>
My guess is that when you try to change the config in your client, then on the server side it cannot be merged with its config. The only way that seems to work here is to apply the new configuration to the entire cluster, discarding all cached data (you should just load it again from Postgres after the upgrade).

Not able to ref dataSource from other bean configuartion in Spring

<bean id="hikariConfig" class="com.zaxxer.hikari.HikariConfig">
<property name="poolName" value="${models.DS_POOL_NAME}" />
</property>
</bean>
<bean id="DBPlaceholder" class="org.springframework.beans.factory.config.PropertyPlaceholderConfigurer">
<property name="systemPropertiesModeName" value="SYSTEM_PROPERTIES_MODE_OVERRIDE"/>
<property name="ignoreUnresolvablePlaceholders" value="true"/>
<property name="properties">
<bean class="org.apache.commons.configuration2.ConfigurationConverter" factory-method="getProperties">
<constructor-arg>
<bean id="DatabaseConfigurator" class="org.apache.commons.configuration2.DatabaseConfiguration">
<property name="dataSource" ref="dataSource" />
<property name="table" value="sample" />
<property name="keyColumn" value="PROPERTY" />
<property name="valueColumn" value="VALUE" />
<property name="configurationNameColumn" value="GROUP_NAME" />
<property name="configurationName" value="new" />
</bean>
</constructor-arg>
</bean>
</property>
</bean>
when we ref dataSource in DBPlaceholder bean then ${models.DS_POOL_NAME} will showing error because this value is coming from properties

Error at Server Startup while adding ActiveMQ by replacing HornetQ

I am using Spring 4, Hibernate, and ActiveMQ using REST APIs
ActiveMQ configurations
<bean id="jmsTransactionManager" class="org.springframework.jms.connection.JmsTransactionManager">
<property name="connectionFactory" ref="connectionFactory"></property>
</bean>
<!-- Configuration for Publishing Jms Messages -->
<bean id="activeMqConnectionFactory" class="org.apache.activemq.ActiveMQConnectionFactory">
<property name="brokerURL" value="tcp://localhost:61616"/>
<property name="disableTimeStampsByDefault" value="true"/>
<property name="useAsyncSend" value="true"/>
<property name="nonBlockingRedelivery" value="true"/>
<property name="redeliveryPolicy" ref="redeliveryPolicy"/>
</bean>
<bean id="redeliveryPolicy" class="org.apache.activemq.RedeliveryPolicy">
<property name="initialRedeliveryDelay" value="60000"/>
<property name="backOffMultiplier" value="3"/>
<property name="maximumRedeliveryDelay" value="60000"/>
<property name="maximumRedeliveries" value="3"/>
<property name="redeliveryDelay" value="60000"/>
<property name="useExponentialBackOff" value="true"/>
</bean>
<!--<bean id="outgoingSmsQueue" class="org.apache.activemq.command.ActiveMQQueue">
<property name="physicalName" value="queue/outgoingSmsQueue"/>
</bean>
<bean id="emailServiceQueue" class="org.apache.activemq.command.ActiveMQQueue">
<property name="physicalName" value="queue/emailServiceQueue"/>
</bean>-->
<bean id="activemqOrderQueue" class="org.apache.activemq.command.ActiveMQQueue">
<property name="physicalName" value="ActiveMQ.orderQueue"/>
</bean>
<bean id="activemqDLQ" class="org.apache.activemq.command.ActiveMQQueue">
<property name="physicalName" value="ActiveMQ.DLQ"/>
</bean>
<bean id="connectionFactory" class="org.springframework.jms.connection.CachingConnectionFactory">
<property name="targetConnectionFactory" ref="activeMqConnectionFactory"/>
<property name="sessionCacheSize" value="200"/>
<property name="cacheProducers" value="true"/>
<property name="cacheConsumers" value="true"/>
</bean>
<bean id="jmsTemplate" class="org.springframework.jms.core.JmsTemplate">
<property name="connectionFactory" ref="connectionFactory"/>
<property name="sessionTransacted" value="true"/>
<property name="deliveryPersistent" value="true"/>
<property name="explicitQosEnabled" value="true"/>
<property name="messageIdEnabled" value="false"/>
<property name="messageTimestampEnabled" value="false"/>
</bean>
<bean id="jmsProducer" class="com.mahopos.jms.JmsProducerImpl">
<property name="jmsTemplate" ref="jmsTemplate"/>
</bean>
<!-- End Configuration for Publishing Jms Messages -->
<bean id="dlqMessageListener" class="com.mahopos.jms.DlqMessageListener" autowire="byName">
</bean>
<bean id="dlqMessageListenerContainer" class="org.springframework.jms.listener.DefaultMessageListenerContainer"
parent="baseMessageListenerContainer">
<property name="destination" ref="activemqDLQ"/>
<property name="messageListener" ref="dlqMessageListener"/>
</bean>
<bean id="baseMessageListenerContainer" class="org.springframework.jms.listener.DefaultMessageListenerContainer"
abstract="true">
<property name="connectionFactory" ref="connectionFactory"/>
<property name="transactionManager" ref="jmsTransactionManager"/>
<property name="concurrentConsumers" value="10"/>
<property name="maxConcurrentConsumers" value="10"/>
<property name="idleConsumerLimit" value="1"/>
<property name="receiveTimeout" value="5000"/>
<property name="idleTaskExecutionLimit" value="50"/>
<property name="cacheLevel">
<util:constant
static-field="org.springframework.jms.listener.DefaultMessageListenerContainer.CACHE_CONSUMER"/>
</property>
</bean>
<bean id="orderQueueListener" class="com.mahopos.jms.OrderQueueListener">
<property name="demoApiService" ref="demoApiService"/>
<property name="delay" value="1500"/>
</bean>
<bean id="orderQueueListenerContainer" class="org.springframework.jms.listener.DefaultMessageListenerContainer"
parent="baseMessageListenerContainer">
<property name="concurrentConsumers" value="5"/>
<property name="destination" ref="activemqOrderQueue"/>
<property name="messageListener" ref="orderQueueListener"/>
<property name="sessionTransacted" value="true"/>
</bean>
Apparently it seems that either ContextLoaderListener is initialized more than one time, but where?
The server runs successfully but the attached image is shown in Chrome (browser) and no REST call is running after server startup.
#justin Bertram, HornetQ was working previously and I replaced it with Activemq, and the application is REST base.
Issue is resolved by adding replacing the dependency from
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-aop</artifactId>
<version>3.2.5.RELEASE</version>
</dependency>
to
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-aop</artifactId>
<version>4.3.22.RELEASE</version>
</dependency>
it seems issue was due to conflicting libraries

org.apache.ignite.IgniteCheckedException: Cannot enable write-through

Here is the configuration for the cache. I want writeThrough to be enabled. why i got the below exception? what does "writer or store is not provided" mean?
Configuration:
<property name="cacheConfiguration">
<bean class="org.apache.ignite.configuration.CacheConfiguration">
<property name="name" value="txnCache"/>
<property name="cacheMode" value="PARTITIONED"/>
<property name="writeSynchronizationMode" value="FULL_SYNC"/>
<property name="writeThrough" value="true"/>
<property name="backups" value="1"/>
<!--property name="cacheMode" value="REPLICATED"/-->
<!-- <property name="atomicityMode" value="ATOMIC"/>
<property name="readFromBackup" value="true"/>
<property name="copyOnRead" value="true"/>-->
</bean>
</property>
Error:
[13:24:07,176][SEVERE][main][IgniteKernal] Got exception while starting (will rollback startup routine).
class org.apache.ignite.IgniteCheckedException: Cannot enable write-through (writer or store is not provided) for cache: txnCache
at org.apache.ignite.internal.processors.cache.GridCacheProcessor.validate(GridCacheProcessor.java:482)
at org.apache.ignite.internal.processors.cache.GridCacheProcessor.createCache(GridCacheProcessor.java:1462)
at org.apache.ignite.internal.processors.cache.GridCacheProcessor.onKernalStart(GridCacheProcessor.java:885)
at org.apache.ignite.internal.IgniteKernal.start(IgniteKernal.java:1013)
at org.apache.ignite.internal.IgnitionEx$IgniteNamedInstance.start0(IgnitionEx.java:1895)
at org.apache.ignite.internal.IgnitionEx$IgniteNamedInstance.start(IgnitionEx.java:1647)
at org.apache.ignite.internal.IgnitionEx.start0(IgnitionEx.java:1075)
at org.apache.ignite.internal.IgnitionEx.start(IgnitionEx.java:573)
at org.apache.ignite.internal.processors.platform.PlatformAbstractBootstrap.start(PlatformAbstractBootstrap.java:48)
at org.apache.ignite.internal.processors.platform.PlatformIgnition.start(PlatformIgnition.java:76)
[13:24:07] Cancelled rebalancing from all nodes [topology=null]
[13:24:07] Cancelled rebalancing from all nodes [topology=null]
To configure write-through, you need to implement the CacheStore interface(or use one of the existing) and set cacheStoreFactory as well writeThrough property of CacheConfiguration, it will look like:
<bean id= "simpleDataSource" class="org.h2.jdbcx.JdbcDataSource"/>
<bean id="ignite.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
...
<property name="cacheConfiguration">
<list>
<bean class="org.apache.ignite.configuration.CacheConfiguration">
...
<property name="writeThrough" value="true"/>
<property name="cacheStoreFactory">
<bean class="org.apache.ignite.cache.store.jdbc.CacheJdbcPojoStoreFactory">
<property name="dataSourceBean" value = "simpleDataSource" />
</bean>
</property>
</bean>
</list>
</property>
</bean>
Here is more information about cacheStore and writeThrough:
https://apacheignite.readme.io/v2.0/docs/persistent-store#section-read-through-and-write-through
what does "writer or store is not provided" mean?
It means that you didn't provide store in configuration.

How to custom Spring Batch DelimitedLineTokenizer

I have two file types to insert in database.
Format are : aa;bb;cc and aa;bb;cc;dd;ee
This is my FlatFileItemReader :
<bean name="readerContractToAddIntoPRV" class="org.springframework.batch.item.file.FlatFileItemReader">
<property name="comments" value="#" />
<property name="linesToSkip" value="1" />
<property name="strict" value="false" />
<property name="lineMapper">
<bean class="org.springframework.batch.item.file.mapping.DefaultLineMapper">
<property name="fieldSetMapper">
<bean class="net.wl.batchs.fieldSetMapper.LineToCreateIntoPrvFieldSetMapper" />
</property>
<property name="lineTokenizer">
<bean class="org.springframework.batch.item.file.transform.DelimitedLineTokenizer">
<property name="delimiter" value=";"/>
<property name="names" value="aa,bb,cc,dd,ee" />
</bean>
</property>
</bean>
</property>
</bean>
I want a setup that works for both types of files.
For the moment, I have this :
org.springframework.batch.item.file.transform.IncorrectTokenCountException:
Incorrect number of tokens found in record: expected 3 actual 5
Do you have any ideas?
Thank you.
Edit : After correction :
<bean name="readerContractToAddIntoPRV" class="org.springframework.batch.item.file.FlatFileItemReader">
<property name="comments" value="#" />
<property name="linesToSkip" value="1" />
<property name="strict" value="false" />
<property name="lineMapper">
<bean class="org.springframework.batch.item.file.mapping.DefaultLineMapper" p:lineTokenizer-ref="multilineFileTokenizer">
<property name="fieldSetMapper">
<bean class="net.wl.batchs.fieldSetMapper.LineToCreateIntoPrvFieldSetMapper" />
</property>
</bean>
</property>
</bean>
<bean id="multilineFileTokenizer" class="org.springframework.batch.item.file.transform.PatternMatchingCompositeLineTokenizer">
<property name="tokenizers">
<map>
<entry key="*;*;*;*;*" value-ref="NSCE_ICCID_MSISDN_LOGIN_PWD"/>
<entry key="*;*;*" value-ref="NSCE_ICCID_MSISDN"/>
<entry key="*" value-ref="headerDefault"/>
</map>
</property>
</bean>
<bean id="parentLineTokenizer" class="org.springframework.batch.item.file.transform.DelimitedLineTokenizer" abstract="true">
<property name="delimiter" value=";"/>
</bean>
<bean id="NSCE_ICCID_MSISDN_LOGIN_PWD" parent="parentLineTokenizer">
<property name="names" value="nsce,iccid,msisdn,login,pwd" />
</bean>
<bean id="NSCE_ICCID_MSISDN" parent="parentLineTokenizer">
<property name="names" value="nsce,iccid,msisdn" />
</bean>
<bean id="headerDefault" parent="parentLineTokenizer">
<property name="names" value="nsce,iccid,msisdn" />
</bean>
The issue isn't your tokenizer. What you'll have to do is use the PatternMatchingCompositeLineMapper (http://docs.spring.io/spring-batch/trunk/apidocs/org/springframework/batch/item/file/mapping/PatternMatchingCompositeLineMapper.html). This will allow you to create a pattern for each line type you have and associate it with the appropriate LineTokenizer.
You can see this LineMapper in action in our samples here: https://github.com/spring-projects/spring-batch/blob/master/spring-batch-samples/src/main/resources/jobs/multilineOrderInputTokenizers.xml

Resources