Spring Batch JDBCPagingItemReader not partitioning equally for each thread - spring

this is my first question here. I am working on a spring batch and I am using step partitioning for processing 70K records. For testing I am using 1021 records and found that the partitioning not happening equally for each thread. I am using JDBCPagingItemReader with 5 threads. The distribution should be
Thread 1 - 205
Thread 2 - 205
Thread 3 - 205
Thread 4 - 205
Thread 5 - 201
But unfortunately this is not happening and I am getting the below record distribution among threads
Thread 1 - 100
Thread 2 - 111
Thread 3 - 100
Thread 4 - 205
Thread 5 - 200
Total 716 records and 305 records are skipped while partitioning. I really don't have any clue what is happening. Could you please look at the below configurations and let me know am I missing anything? Thanks in advance for your help.
<import resource="../config/batch-context.xml" />
<import resource="../config/database.xml" />
<job id="partitionJob" xmlns="http://www.springframework.org/schema/batch">
<step id="masterStep" parent="abstractPartitionerStagedStep">
<partition step="slave" partitioner="rangePartitioner">
<handler grid-size="5" task-executor="taskExecutor"/>
</partition>
</step>
</job>
<bean id="abstractPartitionerStagedStep" abstract="true">
<property name="listeners">
<list>
<ref bean="updatelistener" />
</list>
</property>
</bean>
<bean id="updatelistener"
class="com.test.springbatch.model.UpdateFileCopyStatus" >
</bean>
<!-- Jobs to run -->
<step id="slave" xmlns="http://www.springframework.org/schema/batch">
<tasklet>
<chunk reader="pagingItemReader" writer="flatFileItemWriter"
processor="itemProcessor" commit-interval="1" retry-limit="0" skip-limit="100">
<skippable-exception-classes>
<include class="java.lang.Exception"/>
</skippable-exception-classes>
</chunk>
</tasklet>
</step>
<bean id="rangePartitioner" class="com.test.springbatch.partition.RangePartitioner">
<property name="dataSource" ref="dataSource" />
</bean>
<bean id="taskExecutor" class="org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor" >
<property name="corePoolSize" value="5"/>
<property name="maxPoolSize" value="5"/>
<property name="queueCapacity" value="100" />
<property name="allowCoreThreadTimeOut" value="true"/>
<property name="keepAliveSeconds" value="60" />
</bean>
<bean id="itemProcessor" class="com.test.springbatch.processor.CaseProcessor" scope="step">
<property name="threadName" value="#{stepExecutionContext[name]}" />
</bean>
<bean id="pagingItemReader"
class="org.springframework.batch.item.database.JdbcPagingItemReader"
scope="step">
<property name="dataSource" ref="dataSource" />
<property name="queryProvider">
<bean
class="org.springframework.batch.item.database.support.SqlPagingQueryProviderFactoryBean">
<property name="dataSource" ref="dataSource" />
<property name="selectClause" value="SELECT *" />
<property name="fromClause" value="FROM ( SELECT CASE_NUM ,CASE_STTS_CD, UPDT_TS,SBMT_OFC_CD,
SBMT_OFC_NUM,DSTR_CHNL_CD,APRV_OFC_CD,APRV_OFC_NUM,SBMT_TYP_CD, ROW_NUMBER()
OVER(ORDER BY CASE_NUM) AS rownumber FROM TSMCASE WHERE PROC_IND ='N' ) AS data" />
<property name="whereClause" value="WHERE rownumber BETWEEN :fromRow AND :toRow " />
<property name="sortKey" value="CASE_NUM" />
</bean>
</property>
<!-- Inject via the ExecutionContext in rangePartitioner -->
<property name="parameterValues">
<map>
<entry key="fromRow" value="#{stepExecutionContext[fromRow]}" />
<entry key="toRow" value="#{stepExecutionContext[toRow]}" />
</map>
</property>
<property name="pageSize" value="100" />
<property name="rowMapper">
<bean class="com.test.springbatch.model.CaseRowMapper" />
</property>
</bean>
<bean id="flatFileItemWriter" class="com.test.springbatch.writer.FNWriter" scope="step" >
</bean>
Here the partitioner code
public class OffRangePartitioner implements Partitioner {
private String officeLst;
private double splitvalue;
private DataSource dataSource;
private static Logger LOGGER = Log4JFactory.getLogger(OffRangePartitioner.class);
private static final int INDENT_LEVEL = 6;
public String getOfficeLst() {
return officeLst;
}
public void setOfficeLst(final String officeLst) {
this.officeLst = officeLst;
}
public void setDataSource(DataSource dataSource) {
this.dataSource = dataSource;
}
public OfficeRangePartitioner() {
super();
final GlobalProperties globalProperties = GlobalProperties.getInstance();
splitvalue = Double.parseDouble(globalProperties.getProperty("springbatch.part.splitvalue"));
}
#Override
public Map<String, ExecutionContext> partition(int threadSize) {
FormattedTraceHelper.formattedTrace(LOGGER,"Partition method in OffRangePartitioner class Start",INDENT_LEVEL, Level.INFO_INT);
final Session currentSession = HibernateUtil.getSessionFactory(HibernateConstants.DB2_DATABASE_NAME).getCurrentSession();
Query queryObj;
double count = 0.0;
final Transaction transaction = currentSession.beginTransaction();
queryObj = currentSession.createQuery(BatchConstants.PARTITION_CNT_QRY);
if (queryObj.iterate().hasNext()) {
count = Double.parseDouble(queryObj.iterate().next().toString());
}
int fromRow = 0;
int toRow = 0;
ExecutionContext context;
FormattedTraceHelper.formattedTrace(LOGGER,"Count of total records submitted for processing >> " + count, INDENT_LEVEL, Level.DEBUG_INT);
int gridSize = (int) Math.ceil(count / splitvalue);
FormattedTraceHelper.formattedTrace(LOGGER,"Total Grid size based on the count >> " + gridSize, INDENT_LEVEL, Level.DEBUG_INT);
Map<String, ExecutionContext> result = new HashMap<String, ExecutionContext>();
for (int threadCount = 1; threadCount <= gridSize; threadCount++) {
fromRow = toRow + 1;
if (threadCount == gridSize || gridSize == 1) {
toRow = (int) count;
} else {
toRow += splitvalue;
}
context = new ExecutionContext();
context.putInt("fromRow", fromRow);
context.putInt("toRow", toRow);
context.putString("name", "Processing Thread" + threadCount);
result.put("partition" + threadCount, context);
FormattedTraceHelper.formattedTrace(LOGGER, "Partition number >> "
+ threadCount + " from Row#: " + fromRow + " to Row#: "
+ toRow, INDENT_LEVEL, Level.DEBUG_INT);
}
if (transaction != null) {
transaction.commit();
}
FormattedTraceHelper.formattedTrace(LOGGER,
"Partition method in OffRangePartitioner class End",
INDENT_LEVEL, Level.INFO_INT);
return result;
}
}
Today, I have tested the same batch with 1056 records with Spring Framework log debug on.
PAGE SIZE 100
SELECT * FROM (
SELECT CASE_NUM, CASE_STTS_CD, UPDT_TS,SBMT_OFC_CD, SBMT_OFC_NUM, DSTR_CHNL_CD,
APRV_OFC_CD, APRV_OFC_NUM,SBMT_TYP_CD, ROW_NUMBER() OVER(ORDER BY CASE_NUM) AS rownumber
FROM TCASE
WHERE **SECARCH_PROC_IND ='P'**
) AS data
WHERE
rownumber BETWEEN :fromRow AND :toRow
ORDER BY
rownumber ASC
FETCH FIRST 100 ROWS ONLY
We are updating the SECARCH_PROC_IND ='P' flag to 'C' once each record processed. We are using ROWNUM in the main query to partition the records based on SECARCH_PROC_IND ='P' and the ROWNUM getting changed once the SECARCH_PROC_IND ='P' flag updated to 'C'by any threads.
Looks like this is the issue.

Spring Batch fires below query to fetch the data from databse
SELECT * FROM ( SELECT CASE_NUM, CASE_STTS_CD, UPDT_TS,SBMT_OFC_CD, SBMT_OFC_NUM, DSTR_CHNL_CD, APRV_OFC_CD, APRV_OFC_NUM,SBMT_TYP_CD, **ROW_NUMBER()** OVER(ORDER BY CASE_NUM) AS rownumber FROM TCASE WHERE **SECARCH_PROC_IND ='P'** ) AS data WHERE rownumber BETWEEN :fromRow AND :toRow ORDER BY rownumber ASC FETCH FIRST 100 ROWS ONLY
After processing each row the flag SECARCH_PROC_IND ='P' is updated to SECARCH_PROC_IND ='C'. As SECARCH_PROC_IND is used in WHERE clause and this is actually reducing the ROW_NUMBER in next sequence of queries fired by spring batch. This is the root cause of the issue.
We have introduced another column SECARCH_PROC_TMP_IND in the table which we are updating before batch processing with flag 'P' in beforeJob() method and we are using that column in WHERE clause of the query instead of using SECARCH_PROC_IND column.
Once batch processed, in afterJob() we are re-setting the SECARCH_PROC_TMP_IND to NULL.
This resolved the partition issue.

Related

how to indicate the end of csv file using spring batch?

I have a csv file that contains 4 lines.
Step is executed normal, But it throws an error when it goes beyond the 4th line and I have only 4 lines.
in fieledSetMapper class I display the lines of my file
public class BatchFieldSetMapper implements FieldSetMapper<Batch>{
#Override
public Batch mapFieldSet(FieldSet fieldSet) throws BindException {
Batch result = new Batch();
result.setInstitution(fieldSet.readString(0));
System.out.println("Institution ==> " + result.getInstitution());
result.setType(fieldSet.readString(1));
System.out.println("Type ==> " + result.getType());
result.setNom(fieldSet.readString(2));
System.out.println("Nom ==> " + result.getNom());
result.setRubrique(fieldSet.readString(3));
System.out.println("Rubrique ==> " + result.getRubrique());
result.setMontantPaye(fieldSet.readDouble(4));
System.out.println("MT P ==> " + result.getMontantPaye());
result.setMontantRetenu(fieldSet.readDouble(5));
System.out.println("MT R ==> " + result.getMontantRetenu());
return result;
}
}
And this error appears
org.springframework.batch.item.file.FlatFileParseException: Parsing error at line: 5 in resource=[URL [file:C:/Temp/kk/xx.csv]], input=[;;;;;]
But I don't know how to indicate the end of the file normally it should do it automatically? no ?
PS : I upload the file using primefaces as UploadedFile And I convert it using this method to put it in a temporary file and for the batch to retrieve it and apply subsequent processing
public void uploadFile(FileUploadEvent e) throws IOException{
UploadedFile uploadedCsv=e.getFile();
String filePath="C:/Temp/kk/xx.csv";
byte[] bytes=null;
if(uploadedCsv != null){
bytes=uploadedCsv.getContents();
BufferedOutputStream stream = new BufferedOutputStream(new FileOutputStream(new File(filePath)));
String filename = FilenameUtils.getName(uploadedCsv.getFileName());
stream.write(bytes);
stream.close();
}
ApplicationContext context = new ClassPathXmlApplicationContext("spring-batch-context.xml");
JobLauncher jobLauncher = (JobLauncher) context.getBean("jobLauncher");
Job job = (Job) context.getBean("batchJob");
try {
JobExecution execution = jobLauncher.run(job, new JobParameters());
} catch (JobExecutionException e1) {
System.out.println("Job Batch failed");
e1.printStackTrace();
}
}
And here is my spring-batch-context
<!-- JobRepository and JobLauncher are configuration/setup classes -->
<bean id="jobRepository" class="org.springframework.batch.core.repository.support.MapJobRepositoryFactoryBean" />
<bean id="jobLauncher" class="org.springframework.batch.core.launch.support.SimpleJobLauncher">
<property name="jobRepository" ref="jobRepository" />
</bean>
<!-- à voir comment récuperer le nom du fichier et le mettre au value -->
<bean id="multiResourceItemReader" class="org.springframework.batch.item.file.MultiResourceItemReader">
<property name="resources" value="file:C:/Temp/kk/xx.csv" />
<property name="delegate" ref="flatFileItemReader" />
</bean>
<!-- ItemReader reads a complete line one by one from input file -->
<bean id="flatFileItemReader" class="org.springframework.batch.item.file.FlatFileItemReader" scope="step">
<property name="lineMapper">
<bean class="org.springframework.batch.item.file.mapping.DefaultLineMapper">
<property name="fieldSetMapper">
<!-- Mapper which maps each individual items in a record to properties in POJO -->
<bean class="ma.controle.gestion.springbatch.BatchFieldSetMapper" />
</property>
<property name="lineTokenizer">
<!-- A tokenizer class to be used when items in input record are separated by specific characters -->
<bean class="org.springframework.batch.item.file.transform.DelimitedLineTokenizer">
<property name="delimiter" value=";" />
</bean>
</property>
</bean>
</property>
</bean>
<!-- ItemWriter which writes data to database -->
<bean id="databaseItemWriter" class="org.springframework.batch.item.database.HibernateItemWriter">
<property name="sessionFactory" ref="sessionFactory" />
</bean>
<!-- Optional ItemProcessor to perform business logic/filtering on the input records -->
<bean id="itemProcessor" class="ma.controle.gestion.springbatch.BatchItemProcessor" />
<!-- Optional JobExecutionListener to perform business logic before and after the job -->
<bean id="jobListener" class="ma.controle.gestion.springbatch.BatchJobItemListener" />
<!-- Actual Job -->
<batch:job id="batchJob">
<batch:step id="step1">
<batch:tasklet transaction-manager="txManager">
<batch:chunk reader="multiResourceItemReader" writer="databaseItemWriter"
processor="itemProcessor" commit-interval="10" />
</batch:tasklet>
</batch:step>
<batch:listeners>
<batch:listener ref="jobListener" />
</batch:listeners>
</batch:job>
your 5th line is having empty values seems your BatchFieldSetMapper mapper is failing so your reader is throwing an exception.Can you check your mapper for null values.

Spring Batch - Last item from the reader alone is getting updated

I have to read from a file (FlatFile) and update a column if that ID present in the file matches the id in the column.The file is being read properly but only the last id value is getting updated here . Please find the snippet
Job-Config.xml
<bean id="abcitemReader" class="org.springframework.batch.item.file.FlatFileItemReader" scope="step">
<property name="resource" value="file:datafile/outputs/ibdData.txt" />
<property name="lineMapper">
<bean class="org.springframework.batch.item.file.mapping.DefaultLineMapper">
<property name="lineTokenizer">
<bean class="org.springframework.batch.item.file.transform.DelimitedLineTokenizer">
<property name="names" value="ID,NAM,TYPE" />
<property name="delimiter" value="|"/>
</bean>
</property>
<property name="fieldSetMapper">
<bean class="com.pershing.intraware.springbatch.mapper.abcFieldsetMapper" />
</property>
</bean>
</property>
</bean>
<bean id="abcitemWriter" class="org.springframework.batch.item.database.JdbcBatchItemWriter" scope="step">
<property name="dataSource" ref="dataSource" />
<property name="sql"><value>UPDATE TEST_abc SET BIZ_ARNG_CD = CASE WHEN ID IN (SELECT ID FROM TEST_abc WHERE ID= ? and MONTH=(to_char(sysdate, 'MM')) AND YR =(to_char(sysdate, 'YY'))) THEN 'Y' ELSE 'N' END</value></property>
<!-- It will take care matching between object property and sql name parameter -->
<property name="itemPreparedStatementSetter" ref="testPrepStatementSetter" />
</bean>
</beans>
Setter.java
public class IDItemPreparedStatementSetter implements ItemPreparedStatementSetter<Test> {
#Override
public void setValues(Test item, PreparedStatement ps) throws SQLException {
// TODO Auto-generated method stub
ps.setString(1, item.getID());
}
}
Your query is updating each row of database every time it is fired. You need to restrict that. Currently; it must be setting the BIZ_ARNG_CD to 'Y' for records with ID equal to the ID of the last record passed to the writer.
You can fix this in 2 ways -
Default the database column to 'N' and don't set it to 'N' in the update statement
Add where clause in update script ( BIZ_ARNG_CD != 'Y')

Why Spring batch executing as singleton instead of multithread

I am invoking a spring batch job through quartz scheduler, which should run every 1 minute.
When the job runs the first time, the ItemReader is opened successfully and the job runs. However when the job attempts to run a second time, it's using the same instance it did the first time which is already initialized and receiving "java.lang.IllegalStateException: Stream is already initialized. Close before re-opening." I have set scope as step for both itemreader and itemwriter.
Please let me know if I am doing anything wrong in configuration?
<?xml version="1.0" encoding="UTF-8"?>
<import resource="context.xml"/>
<import resource="database.xml"/>
<bean id="MyPartitioner" class="com.MyPartitioner" />
<bean id="itemProcessor" class="com.MyProcessor" scope="step" />
<bean id="itemReader" class="com.MyItemReader" scope="step">
<property name="dataSource" ref="dataSource"/>
<property name="sql" value="query...."/>
<property name="rowMapper">
<bean class="com.MyRowMapper" scope="step"/>
</property>
</bean>
<job id="MyJOB" xmlns="http://www.springframework.org/schema/batch">
<step id="masterStep">
<partition step="slave" partitioner="MyPartitioner">
<handler grid-size="10" task-executor="taskExecutor"/>
</partition>
</step>
</job>
<step id="slave" xmlns="http://www.springframework.org/schema/batch">
<tasklet>
<chunk reader="itemReader" writer="mysqlItemWriter" processor="itemProcessor" commit-interval="100"/>
</tasklet>
</step>
<bean id="taskExecutor" class="org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor">
<property name="corePoolSize" value="20"/>
<property name="maxPoolSize" value="20"/>
<property name="allowCoreThreadTimeOut" value="true"/>
</bean>
<bean id="mysqlItemWriter" class="com.MyItemWriter" scope="step">
<property name="dataSource" ref="dataSource"/>
<property name="sql">
<value>
<![CDATA[
query.....
]]>
</value>
</property>
<property name="itemPreparedStatementSetter">
<bean class="com.MyPreparedStatementSetter" scope="step"/>
</property>
</bean>
Quartz job invoker-
Scheduler scheduler = new StdSchedulerFactory("quartz.properties").getScheduler();
JobKey jobKey = new JobKey("QUARTZJOB", "QUARTZJOB");
JobDetail jobDetail = JobBuilder.newJob("com.MySpringJobInvoker").withIdentity(jobKey).build();
jobDetail.getJobDataMap().put("jobName", "SpringBatchJob");
SimpleTrigger smplTrg = newTrigger().withIdentity("QUARTZJOB", "QUARTZJOB").startAt(new Date(startTime))
.withSchedule(simpleSchedule().withIntervalInSeconds(frequency).withRepeatCount(repeatCnt))
.forJob(jobDetail).withPriority(5).build();
scheduler.scheduleJob(jobDetail, smplTrg);
Quartz job -
public class MySpringJobInvoker implements Job
{
#Override
public void execute(JobExecutionContext jobExecutionContext) throws JobExecutionException
{
JobDataMap data = jobExecutionContext.getJobDetail().getJobDataMap();
ApplicationContext applicationContext =ApplicationContextUtil.getInstance();
JobLauncher jobLauncher = (JobLauncher) applicationContext.getBean("jobLauncher");
org.springframework.batch.core.Job job = (org.springframework.batch.core.Job) applicationContext.getBean(data.getString("jobName"));
JobParameters param = new JobParametersBuilder().addString("myparam","myparam").addString(Long.toString(System.currentTimeMillis(),Long.toString(System.currentTimeMillis())).toJobParameters();
JobExecution execution = jobLauncher.run(job, param);
}
}
Singletonclass -
public class ApplicationContextUtil
{
private static ApplicationContext applicationContext;
public static synchronized ApplicationContext getInstance()
{
if(applicationContext == null)
{
applicationContext = new ClassPathXmlApplicationContext("myjob.xml");
}
return applicationContext;
}
}
what are the parameters that you are passing to the Spring Batch job from Quartz? Could you post the exception stack trace?
If your trying to execute the second instance of the batch with the same parameters - it won't work. Spring Batch identifies a unique instance of the job based on parameters passed - so every new instance of the job requires different parameters to be passed.

How to make GridCacheStore-backed get() store its result in the cache

When I use a GridCacheStore-backed cache, the first get() may take a long time, because internally the GridCacheStore will perform calculations / slow search etc.
The second get() on that node will be quick because the result has been cached by that node.
However, the get() on other node is still slow because the result is not being replicated. How to make this replicated? I've set cacheMode=REPLICATED.
My config:
<bean parent="cache-template">
<property name="name" value="yagoEntityByLabel" />
<property name="cacheMode" value="REPLICATED" />
<property name="atomicityMode" value="ATOMIC" />
<property name="distributionMode" value="NEAR_PARTITIONED" />
<property name="backups" value="1" />
<property name="store">
<bean class="id.ac.itb.ee.lskk.lumen.core.yago.YagoEntityByLabelCacheStore" />
</property>
<property name="swapEnabled" value="false" />
<property name="evictionPolicy">
<bean class="org.gridgain.grid.cache.eviction.lru.GridCacheLruEvictionPolicy">
<property name="maxSize" value="10000" />
</bean>
</property>
</bean>
The workaround is to not use GridCacheStore-backed but to use put() instead, but there's a lot of typing and it's not atomic, since the logic will be:
#Nullable value = cache.get(key);
if (value == null) {
value = calculateHeavily(key);
cache.put(key, value);
}
GridGain (as most other data grids) does not replicate on Get operation - it simply loads data into cache. Your workaround using Put operation is good.
To make it atomic (if you need to do so), you can wrap your code into transaction, like so:
try (GridCacheTx tx = cache.txStart()) {
#Nullable value = cache.get(key);
if (value == null) {
value = calculateHeavily(key);
cache.put(key, value);
}
tx.commit();
}

Datasource initialization at server start up

We have an application where we have used spring for IOC. We have the dataSource bean configured in applicationContext.xml and that is referenced in other bean definations.
The dataSource bean defination looks like:
<bean id="dbDataSource" class="org.apache.commons.dbcp.BasicDataSource"
destroy-method="close">
<property name="driverClassName" value="oracle.jdbc.driver.OracleDriver" />
<property name="url"
value="jdbc:oracle:oci:#TESTDB" />
<property name="username" value="TESTUSER" />
<property name="password" value="TESTPWD" />
<property name="initialSize" value="50" />
<property name="maxActive" value="40" />
<property name="maxIdle" value="10" />
<property name="minIdle" value="10" />
<property name="maxWait" value="-1" />
</bean>
<bean id="serviceDAO" class="com.test.impl.ServiceDAOImpl">
<property name="dataSource" ref="dbDataSource" />
</bean>
ServiceDAOImpl looks as follows:
public class ServiceDAOImpl implements ServiceDAO {
private JdbcTemplate jdbcTemplate;
public void setDataSource(DataSource dataSource) {
this.jdbcTemplate = new JdbcTemplate(dataSource);
}
#SuppressWarnings({ "rawtypes", "unchecked" })
public ValueObj readValue(String key) {
String query = "SELECT * FROM SERVICE_LOOKUP WHERE KEY=?";
/**
* Implement the RowMapper callback interface
*/
return (ValueObj) jdbcTemplate.queryForObject(query,
new Object[] { key }, new RowMapper() {
public Object mapRow(ResultSet resultSet, int rowNum)
throws SQLException {
return new ValueObj(resultSet.getString("KEY"),
resultSet.getString("VALUE"));
}
});
}
public ServiceDAOImpl() {
}
}
Now, at the server start up injection is happening fine and when we use the dataSource in serviceDAOImpl the connection is happening fine. But the very first time the database call is made it takes around 3 mins to get the response back. I think this is because the pool creation is done during the first call and we have set the parameter "initialSize" = 50 in applicationConext.xml.
So, to avoid this we need a way in which the pool can be created during the application startup itself and can be used directly.
Please suggest. Let me know if any clarification required.
Regards
Saroj
There's a work-around for this .You could force jdbcTemplate to use the
DB connection at startup. See the link here for detailed explanation .
<bean id="jdbcTemplate" class="org.springframework.jdbc.core.JdbcTemplate">
<constructor-arg index="0" ref="dataSource"/>
<constructor-arg index="1" value="false"/>
</bean>
The second constructor-arg is the lazy Init flag.
Aravind A's solution is the preffered one, but just in case you don't want to define an extra bean you can point spring to your DAO's init method:
<bean id="serviceDAO" class="com.test.impl.ServiceDAOImpl" init-method="init">
<property name="dataSource" ref="dbDataSource" />
</bean>
and then define ServiceDAOImpl.init() which calls some sql like SELECT 1 FROM SERVICE_LOOKUP LIMIT 1 or even better some noop like SELECT 1:
public class ServiceDAOImpl implements ServiceDAO {
public void init() {
String query = "SELECT 1 FROM SERVICE_LOOKUP LIMIT 1";
int i = jdbcTemplate.queryForInt(query);
}
}

Resources