Quartz doesn't recognize schema job_scheduling_data_2_0.xsd present in quartz jar file - spring

I am getting below exception on server startup.
I am using quartz 2.2.21 with spring 3.2.
I have enabled quartz plugin (org.quartz.plugins.xml.XMLSchedulingDataProcessorPlugin).
Please find below the start tag of our XML file:
During server startup we are getting below log information and stacktrace:
Error Message:
Unable to load local schema packaged in quartz distribution jar. Utilizing schema online at http://www.quartz-scheduler.org/xml/job_scheduling_data_2_0.xsd
Exception:
Caused by: org.xml.sax.SAXParseException; systemId: file:///quartz_job_data.xml; lineNumber: 5; columnNumber: 104;
schema_reference.4: Failed to read schema document 'http://www.quartz-scheduler.org/xml/job_scheduling_data_2_0.xsd', because 1) could not find the document; 2) the document could not be read; 3) the root element of the document is not <xsd:schema>.

I have the same problem. I'm using the 7.1.1 of Jboss and the problem appears when you don't have connection to the internet. This is easy as putting a fake address that's unreachable in hosts.
I tried to force to local copy but it does not work.
What I finally did is to partially overwrite the functionality until this is fixed. Watch: https://jira.spring.io/browse/SPR-13706
public class CustomXMLSchedulingDataProcessor extends org.quartz.xml.XMLSchedulingDataProcessor {
public static final String QUARTZ_XSD_PATH_IN_JAR_CLASSPATH = "classpath:org/quartz/xml/job_scheduling_data_2_0.xsd";
public CustomXMLSchedulingDataProcessor(ClassLoadHelper clh) throws ParserConfigurationException {
super(clh);
}
#Override
protected Object resolveSchemaSource() {
InputSource inputSource;
InputStream is = null;
try {
is = classLoadHelper.getResourceAsStream(QUARTZ_XSD_PATH_IN_JAR_CLASSPATH);
} finally {
if (is != null) {
inputSource = new InputSource(is);
inputSource.setSystemId(QUARTZ_SCHEMA_WEB_URL);
}
else {
return QUARTZ_SCHEMA_WEB_URL;
}
}
return inputSource;
}
}
And I did a new plugin XMLSchedulingDataProcessorPlugin overwritting just the instanciation of above class.
public class XMLSchedulingDataProcessorPlugin
extends SchedulerPluginWithUserTransactionSupport
implements FileScanListener {
/*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* Data members.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
private static final int MAX_JOB_TRIGGER_NAME_LEN = 80;
private static final String JOB_INITIALIZATION_PLUGIN_NAME = "JobSchedulingDataLoaderPlugin";
private static final String FILE_NAME_DELIMITERS = ",";
private boolean failOnFileNotFound = true;
private String fileNames = CustomXMLSchedulingDataProcessor.QUARTZ_XML_DEFAULT_FILE_NAME;
// Populated by initialization
private Map<String, JobFile> jobFiles = new LinkedHashMap<String, JobFile>();
private long scanInterval = 0;
boolean started = false;
protected ClassLoadHelper classLoadHelper = null;
private Set<String> jobTriggerNameSet = new HashSet<String>();
/*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* Constructors.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
public XMLSchedulingDataProcessorPlugin() {
}
/*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* Interface.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
/**
* Comma separated list of file names (with paths) to the XML files that should be read.
*/
public String getFileNames() {
return fileNames;
}
/**
* The file name (and path) to the XML file that should be read.
*/
public void setFileNames(String fileNames) {
this.fileNames = fileNames;
}
/**
* The interval (in seconds) at which to scan for changes to the file.
* If the file has been changed, it is re-loaded and parsed. The default
* value for the interval is 0, which disables scanning.
*
* #return Returns the scanInterval.
*/
public long getScanInterval() {
return scanInterval / 1000;
}
/**
* The interval (in seconds) at which to scan for changes to the file.
* If the file has been changed, it is re-loaded and parsed. The default
* value for the interval is 0, which disables scanning.
*
* #param scanInterval The scanInterval to set.
*/
public void setScanInterval(long scanInterval) {
this.scanInterval = scanInterval * 1000;
}
/**
* Whether or not initialization of the plugin should fail (throw an
* exception) if the file cannot be found. Default is <code>true</code>.
*/
public boolean isFailOnFileNotFound() {
return failOnFileNotFound;
}
/**
* Whether or not initialization of the plugin should fail (throw an
* exception) if the file cannot be found. Default is <code>true</code>.
*/
public void setFailOnFileNotFound(boolean failOnFileNotFound) {
this.failOnFileNotFound = failOnFileNotFound;
}
/*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* SchedulerPlugin Interface.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
/**
* <p>
* Called during creation of the <code>Scheduler</code> in order to give
* the <code>SchedulerPlugin</code> a chance to initialize.
* </p>
*
* #throws org.quartz.SchedulerConfigException
* if there is an error initializing.
*/
public void initialize(String name, final Scheduler scheduler, ClassLoadHelper schedulerFactoryClassLoadHelper)
throws SchedulerException {
super.initialize(name, scheduler);
this.classLoadHelper = schedulerFactoryClassLoadHelper;
getLog().info("Registering Quartz Job Initialization Plug-in.");
// Create JobFile objects
StringTokenizer stok = new StringTokenizer(fileNames, FILE_NAME_DELIMITERS);
while (stok.hasMoreTokens()) {
final String fileName = stok.nextToken();
final JobFile jobFile = new JobFile(fileName);
jobFiles.put(fileName, jobFile);
}
}
#Override
public void start(UserTransaction userTransaction) {
try {
if (jobFiles.isEmpty() == false) {
if (scanInterval > 0) {
getScheduler().getContext().put(JOB_INITIALIZATION_PLUGIN_NAME + '_' + getName(), this);
}
Iterator<JobFile> iterator = jobFiles.values().iterator();
while (iterator.hasNext()) {
JobFile jobFile = iterator.next();
if (scanInterval > 0) {
String jobTriggerName = buildJobTriggerName(jobFile.getFileBasename());
TriggerKey tKey = new TriggerKey(jobTriggerName, JOB_INITIALIZATION_PLUGIN_NAME);
// remove pre-existing job/trigger, if any
getScheduler().unscheduleJob(tKey);
JobDetail job = newJob().withIdentity(jobTriggerName, JOB_INITIALIZATION_PLUGIN_NAME).ofType(FileScanJob.class)
.usingJobData(FileScanJob.FILE_NAME, jobFile.getFileName())
.usingJobData(FileScanJob.FILE_SCAN_LISTENER_NAME, JOB_INITIALIZATION_PLUGIN_NAME + '_' + getName())
.build();
SimpleTrigger trig = newTrigger().withIdentity(tKey).withSchedule(
simpleSchedule().repeatForever().withIntervalInMilliseconds(scanInterval))
.forJob(job)
.build();
getScheduler().scheduleJob(job, trig);
getLog().debug("Scheduled file scan job for data file: {}, at interval: {}", jobFile.getFileName(), scanInterval);
}
processFile(jobFile);
}
}
} catch(SchedulerException se) {
getLog().error("Error starting background-task for watching jobs file.", se);
} finally {
started = true;
}
}
/**
* Helper method for generating unique job/trigger name for the
* file scanning jobs (one per FileJob). The unique names are saved
* in jobTriggerNameSet.
*/
private String buildJobTriggerName(
String fileBasename) {
// Name w/o collisions will be prefix + _ + filename (with '.' of filename replaced with '_')
// For example: JobInitializationPlugin_jobInitializer_myjobs_xml
String jobTriggerName = JOB_INITIALIZATION_PLUGIN_NAME + '_' + getName() + '_' + fileBasename.replace('.', '_');
// If name is too long (DB column is 80 chars), then truncate to max length
if (jobTriggerName.length() > MAX_JOB_TRIGGER_NAME_LEN) {
jobTriggerName = jobTriggerName.substring(0, MAX_JOB_TRIGGER_NAME_LEN);
}
// Make sure this name is unique in case the same file name under different
// directories is being checked, or had a naming collision due to length truncation.
// If there is a conflict, keep incrementing a _# suffix on the name (being sure
// not to get too long), until we find a unique name.
int currentIndex = 1;
while (jobTriggerNameSet.add(jobTriggerName) == false) {
// If not our first time through, then strip off old numeric suffix
if (currentIndex > 1) {
jobTriggerName = jobTriggerName.substring(0, jobTriggerName.lastIndexOf('_'));
}
String numericSuffix = "_" + currentIndex++;
// If the numeric suffix would make the name too long, then make room for it.
if (jobTriggerName.length() > (MAX_JOB_TRIGGER_NAME_LEN - numericSuffix.length())) {
jobTriggerName = jobTriggerName.substring(0, (MAX_JOB_TRIGGER_NAME_LEN - numericSuffix.length()));
}
jobTriggerName += numericSuffix;
}
return jobTriggerName;
}
/**
* Overriden to ignore <em>wrapInUserTransaction</em> because shutdown()
* does not interact with the <code>Scheduler</code>.
*/
#Override
public void shutdown() {
// Since we have nothing to do, override base shutdown so don't
// get extranious UserTransactions.
}
private void processFile(JobFile jobFile) {
if (jobFile == null || !jobFile.getFileFound()) {
return;
}
try {
CustomXMLSchedulingDataProcessor processor =
new CustomXMLSchedulingDataProcessor(this.classLoadHelper);
processor.addJobGroupToNeverDelete(JOB_INITIALIZATION_PLUGIN_NAME);
processor.addTriggerGroupToNeverDelete(JOB_INITIALIZATION_PLUGIN_NAME);
processor.processFileAndScheduleJobs(
jobFile.getFileName(),
jobFile.getFileName(), // systemId
getScheduler());
} catch (Exception e) {
getLog().error("Error scheduling jobs: " + e.getMessage(), e);
}
}
public void processFile(String filePath) {
processFile((JobFile)jobFiles.get(filePath));
}
/**
* #see org.quartz.jobs.FileScanListener#fileUpdated(java.lang.String)
*/
public void fileUpdated(String fileName) {
if (started) {
processFile(fileName);
}
}
class JobFile {
private String fileName;
// These are set by initialize()
private String filePath;
private String fileBasename;
private boolean fileFound;
protected JobFile(String fileName) throws SchedulerException {
this.fileName = fileName;
initialize();
}
protected String getFileName() {
return fileName;
}
protected boolean getFileFound() {
return fileFound;
}
protected String getFilePath() {
return filePath;
}
protected String getFileBasename() {
return fileBasename;
}
private void initialize() throws SchedulerException {
InputStream f = null;
try {
String furl = null;
File file = new File(getFileName()); // files in filesystem
if (!file.exists()) {
URL url = classLoadHelper.getResource(getFileName());
if(url != null) {
try {
furl = URLDecoder.decode(url.getPath(), "UTF-8");
} catch (UnsupportedEncodingException e) {
furl = url.getPath();
}
file = new File(furl);
try {
f = url.openStream();
} catch (IOException ignor) {
// Swallow the exception
}
}
} else {
try {
f = new java.io.FileInputStream(file);
}catch (FileNotFoundException e) {
// ignore
}
}
if (f == null) {
if (isFailOnFileNotFound()) {
throw new SchedulerException(
"File named '" + getFileName() + "' does not exist.");
} else {
getLog().warn("File named '" + getFileName() + "' does not exist.");
}
} else {
fileFound = true;
}
filePath = (furl != null) ? furl : file.getAbsolutePath();
fileBasename = file.getName();
} finally {
try {
if (f != null) {
f.close();
}
} catch (IOException ioe) {
getLog().warn("Error closing jobs file " + getFileName(), ioe);
}
}
}
}
}
That way you only have to use this plugin in your configuration and everything will work by default.
org.quartz.plugin.jobInitializer.class =
com.level2.quartz.processor.plugin.XMLSchedulingDataProcessorPlugin

Related

Set Request Not Working SNMP4j

I have searched everywhere but I haven't found the solution to my problem. I've tried all solutions involving changing the community name, certain different set classes and I still can't get the set to work for SNMP4j. The code executes fine and can be accessed from iReasoning MIB browser but will not change the value of the OID after the set executes. The SNMP classes and tester classes are below.
SNMPManager code:
public class SNMPManager {
Snmp snmp = null;
String address = null;
/**
* Constructor
*
* #param add
*/
public SNMPManager(String add) {
address = add;
}
public static void main(String[] args) throws IOException {
/**
* Port 161 is used for Read and Other operations Port 162 is used for the trap
* generation
*/
SNMPManager client = new SNMPManager("udp:127.0.0.1/161");
client.start();
/**
* OID - .1.3.6.1.2.1.1.1.0 => SysDec OID - .1.3.6.1.2.1.1.5.0 => SysName => MIB
* explorer will be usefull here, as discussed in previous article
*/
String sysDescr = client.getAsString(new OID(".1.3.6.1.2.1.1.1.0"));
System.out.println(sysDescr);
}
/**
* Start the Snmp session. If you forget the listen() method you will not get
* any answers because the communication is asynchronous and the listen() method
* listens for answers.
*
* #throws IOException
*/
public void start() throws IOException {
TransportMapping<?> transport = new DefaultUdpTransportMapping();
snmp = new Snmp(transport);
// Do not forget this line!
transport.listen();
}
/**
* Method which takes a single OID and returns the response from the agent as a
* String.
*
* #param oid
* #return
* #throws IOException
*/
public String getAsString(OID oid) throws IOException {
ResponseEvent event = get(new OID[] { oid });
return event.getResponse().get(0).getVariable().toString();
}
/**
* This method is capable of handling multiple OIDs
*
* #param oids
* #return
* #throws IOException
*/
public ResponseEvent get(OID oids[]) throws IOException {
PDU pdu = new PDU();
for (OID oid : oids) {
pdu.add(new VariableBinding(oid));
}
pdu.setType(PDU.GET);
ResponseEvent event = snmp.send(pdu, getTarget(), null);
if (event != null) {
return event;
}
throw new RuntimeException("GET timed out");
}
public ResponseEvent set(OID oid, String val) throws IOException {
PDU pdu = new PDU();
VariableBinding varBind = new VariableBinding(oid, new OctetString(val));
pdu.add(varBind);
pdu.setType(PDU.SET);
//pdu.setRequestID(new Integer32(1));
Target target = getTarget();
ResponseEvent event = snmp.set(pdu, target);
if (event != null) {
System.out.println("\nResponse:\nGot Snmp Set Response from Agent");
System.out.println("Snmp Set Request = " + event.getRequest().getVariableBindings());
PDU responsePDU = event.getResponse();
System.out.println("\nresponsePDU = " + responsePDU);
if (responsePDU != null) {
int errorStatus = responsePDU.getErrorStatus();
int errorIndex = responsePDU.getErrorIndex();
String errorStatusText = responsePDU.getErrorStatusText();
System.out.println("\nresponsePDU = " + responsePDU);
if (errorStatus == PDU.noError) {
System.out.println("Snmp Set Response = " + responsePDU.getVariableBindings());
} else {
System.out.println("errorStatus = " + responsePDU);
System.out.println("Error: Request Failed");
System.out.println("Error Status = " + errorStatus);
System.out.println("Error Index = " + errorIndex);
System.out.println("Error Status Text = " + errorStatusText);
}
}
return event;
}
throw new RuntimeException("SET timed out");
}
/**
* This method returns a Target, which contains information about where the data
* should be fetched and how.
*
* #return
*/
private Target getTarget() {
Address targetAddress = GenericAddress.parse(address);
CommunityTarget target = new CommunityTarget();
target.setCommunity(new OctetString("public"));
target.setAddress(targetAddress);
target.setRetries(2);
target.setTimeout(1500);
target.setVersion(SnmpConstants.version2c);
return target;
}
}
SNMPAgent code:
public class SNMPAgent extends BaseAgent {
private String address;
/**
*
* #param address
* #throws IOException
*/
public SNMPAgent(String address) throws IOException {
/**
* Creates a base agent with boot-counter, config file, and a CommandProcessor
* for processing SNMP requests. Parameters: "bootCounterFile" - a file with
* serialized boot-counter information (read/write). If the file does not exist
* it is created on shutdown of the agent. "configFile" - a file with serialized
* configuration information (read/write). If the file does not exist it is
* created on shutdown of the agent. "commandProcessor" - the CommandProcessor
* instance that handles the SNMP requests.
*/
super(new File("conf.agent"), new File("bootCounter.agent"),
new CommandProcessor(new OctetString(MPv3.createLocalEngineID())));
this.address = address;
}
/**
* Adds community to security name mappings needed for SNMPv1 and SNMPv2c.
*/
#Override
protected void addCommunities(SnmpCommunityMIB communityMIB) {
Variable[] com2sec = new Variable[] { new OctetString("public"), new OctetString("cpublic"), // security name
getAgent().getContextEngineID(), // local engine ID
new OctetString("public"), // default context name
new OctetString(), // transport tag
new Integer32(StorageType.nonVolatile), // storage type
new Integer32(RowStatus.active) // row status
};
MOTableRow<?> row = communityMIB.getSnmpCommunityEntry()
.createRow(new OctetString("public2public").toSubIndex(true), com2sec);
communityMIB.getSnmpCommunityEntry().addRow((SnmpCommunityEntryRow) row);
}
/**
* Adds initial notification targets and filters.
*/
#Override
protected void addNotificationTargets(SnmpTargetMIB arg0, SnmpNotificationMIB arg1) {
// TODO Auto-generated method stub
}
/**
* Adds all the necessary initial users to the USM.
*/
#Override
protected void addUsmUser(USM arg0) {
// TODO Auto-generated method stub
}
/**
* Adds initial VACM configuration.
*/
#Override
protected void addViews(VacmMIB vacm) {
vacm.addGroup(SecurityModel.SECURITY_MODEL_SNMPv2c, new OctetString("cpublic"), new OctetString("v1v2group"),
StorageType.nonVolatile);
vacm.addAccess(new OctetString("v1v2group"), new OctetString("public"), SecurityModel.SECURITY_MODEL_ANY,
SecurityLevel.NOAUTH_NOPRIV, MutableVACM.VACM_MATCH_EXACT, new OctetString("fullReadView"),
new OctetString("fullWriteView"), new OctetString("fullNotifyView"), StorageType.nonVolatile);
vacm.addViewTreeFamily(new OctetString("fullReadView"), new OID("1.3"), new OctetString(),
VacmMIB.vacmViewIncluded, StorageType.nonVolatile);
}
/**
* Unregister the basic MIB modules from the agent's MOServer.
*/
#Override
protected void unregisterManagedObjects() {
// TODO Auto-generated method stub
}
/**
* Register additional managed objects at the agent's server.
*/
#Override
protected void registerManagedObjects() {
// TODO Auto-generated method stub
}
#SuppressWarnings("unchecked")
protected void initTransportMappings() throws IOException {
transportMappings = new TransportMapping[1];
Address addr = GenericAddress.parse(address);
TransportMapping<?> tm = TransportMappings.getInstance().createTransportMapping(addr);
transportMappings[0] = tm;
}
/**
* Start method invokes some initialization methods needed to start the agent
*
* #throws IOException
*/
public void start() throws IOException {
init();
// This method reads some old config from a file and causes
// unexpected behavior.
// loadConfig(ImportModes.REPLACE_CREATE);
addShutdownHook();
getServer().addContext(new OctetString("public"));
finishInit();
run();
sendColdStartNotification();
}
/**
* Clients can register the MO they need
*/
public void registerManagedObject(ManagedObject mo) {
try {
server.register(mo, null);
} catch (DuplicateRegistrationException ex) {
throw new RuntimeException(ex);
}
}
public void unregisterManagedObject(MOGroup moGroup) {
moGroup.unregisterMOs(server, getContext(moGroup));
}
}
MOCreator code:
public class MOCreator {
public static MOScalar<Variable> createReadOnly(OID oid,Object value ){
return new MOScalar<Variable>(oid,
MOAccessImpl.ACCESS_READ_WRITE,
getVariable(value));
}
private static Variable getVariable(Object value) {
if(value instanceof String) {
return new OctetString((String)value);
}
throw new IllegalArgumentException("Unmanaged Type: " + value.getClass());
}
}
Tester class:
public class TestSNMPAgent {
String siteOIDNumber = "1";
String dataOIDNumber = "1";
String sensorOIDNumber = "1";
OID newOIDdata = new OID("1.3.6.1.4.1.1234.5." + siteOIDNumber + ".2." + dataOIDNumber + "." + sensorOIDNumber + ".0");
OID newOIDdata2 = new OID("1.3.6.1.4.1.1234.5.1.2.1.2.0");
OID newOIDdata3 = new OID("1.3.6.1.4.1.1234.5.1.2.1.3.0");
public static void main(String[] args) throws IOException {
TestSNMPAgent client = new TestSNMPAgent("udp:127.0.0.1/161");
client.init();
}
SNMPAgent agent = null;
/**
* This is the client which we have created earlier
*/
SNMPManager client = null;
String address = null;
/**
* Constructor
*
* #param add
*/
public TestSNMPAgent(String add) {
address = add;
}
/**
* Initiates the testing of the SNMP Agent.
* #throws IOException
*/
private void init() throws IOException {
/*agent = new SNMPAgent("172.21.1.103/2001");*/
agent = new SNMPAgent("172.21.1.103/2010");
agent.start();
// Since BaseAgent registers some MIBs by default we need to unregister
// one before we register our own sysDescr. Normally you would
// override that method and register the MIBs that you need
agent.unregisterManagedObject(agent.getSnmpv2MIB());
//agent.registerManagedObject(MOCreator.createReadOnly(sysDescr,"This Description is set By KGrewe"));
agent.registerManagedObject(MOCreator.createReadOnly(newOIDdata, "50"));
agent.registerManagedObject(MOCreator.createReadOnly(newOIDdata2, "NaN"));
agent.registerManagedObject(MOCreator.createReadOnly(newOIDdata3, "3"));
SNMPManager client1 = new SNMPManager("udp:127.21.1.103/2010");
client1.start();
client1.set(newOIDdata, "30");
//System.out.println(newOIDdata);
// Get back Value which is set
//System.out.println(client1.getAsString(newOIDdata));
while(true) {
}
}
}
Thanks for the help in advance.

Chronicle Queue V3. Can Entries be lost on data block roll-over?

I have an application that writes entries to a Chronicle Queue (V3) that also retains excerpt entry index values in other (Chronicle)Maps by way of providing indexed access in the queue. Sometimes we fail to find a given entry that we've earlier saved and I believe it maybe related to data-block roll-over.
Below is a stand-alone test program that reproduces such use-cases at small-scale. It repeatedly writes an entry and immediately attempts to find the resulting index value up using a separate ExcerptTailer. All is well for a while until the first data-block is used up and a second data file is assigned, then the retrieval failures start. If the data block size is increased to avoid roll-overs, then no entries are lost. Also using a small index data-block size, causing multiple index files to be created, doesn't cause a problem.
The test program also tries using an ExcerptListener running in parallel to see if the entries apparently 'lost' by the writer, are ever received by the reader thread - they're not. Also tries to re-read the resulting queue from start until end, which reconfirms that they really are lost.
Stepping thru' the code, I see that when looking up a 'missing entry', within AbstractVanillarExcerpt#index, it appears to successfully locate the correct VanillaMappedBytes object from the dataCache, but determines that there is no entry and the data-offset as the len == 0. In addition to the entries not being found, at some point after the problems start occurring post-roll-over, an NPE is thrown from within the VanillaMappedFile#fileChannel method due to it having been passed a null File path. The code-path assumes that when resolving a entry looked up successfully in the index that a file will always have been found, but isn't in this case.
Is it possible to reliably use Chronicle Queue across data-block roll-overs, and if so, what am I doing that maybe causing the problem I'm experiencing?
import java.io.IOException;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Set;
import org.junit.Before;
import org.junit.Test;
import net.openhft.affinity.AffinitySupport;
import net.openhft.chronicle.Chronicle;
import net.openhft.chronicle.ChronicleQueueBuilder;
import net.openhft.chronicle.ExcerptAppender;
import net.openhft.chronicle.ExcerptCommon;
import net.openhft.chronicle.ExcerptTailer;
import net.openhft.chronicle.VanillaChronicle;
public class ChronicleTests {
private static final int CQ_LEN = VanillaChronicle.Cycle.DAYS.length();
private static final long CQ_ENT = VanillaChronicle.Cycle.DAYS.entries();
private static final String ROOT_DIR = System.getProperty(ChronicleTests.class.getName() + ".ROOT_DIR",
"C:/Temp/chronicle/");
private static final String QDIR = System.getProperty(ChronicleTests.class.getName() + ".QDIR", "chronicleTests");
private static final int DATA_SIZE = Integer
.parseInt(System.getProperty(ChronicleTests.class.getName() + ".DATA_SIZE", "100000"));
// Chunk file size of CQ index
private static final int INDX_SIZE = Integer
.parseInt(System.getProperty(ChronicleTests.class.getName() + ".INDX_SIZE", "10000"));
private static final int Q_ENTRIES = Integer
.parseInt(System.getProperty(ChronicleTests.class.getName() + ".Q_ENTRIES", "5000"));
// Data type id
protected static final byte FSYNC_DATA = 1;
protected static final byte NORMAL_DATA = 0;
protected static final byte TH_START_DATA = -1;
protected static final byte TH_END_DATA = -2;
protected static final byte CQ_START_DATA = -3;
private static final long MAX_RUNTIME_MILLISECONDS = 30000;
private static String PAYLOAD_STRING = "1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
private static byte PAYLOAD_BYTES[] = PAYLOAD_STRING.getBytes();
private Chronicle _chronicle;
private String _cqPath = ROOT_DIR + QDIR;
#Before
public void init() {
buildCQ();
}
#Test
public void test() throws IOException, InterruptedException {
boolean passed = true;
Collection<Long> missingEntries = new LinkedList<Long>();
long sent = 0;
Thread listener = listen();
try {
listener.start();
// Write entries to CQ,
for (int i = 0; i < Q_ENTRIES; i++) {
long entry = writeQEntry(PAYLOAD_BYTES, (i % 100) == 0);
sent++;
// check each entry can be looked up
boolean found = checkEntry(i, entry);
if (!found)
missingEntries.add(entry);
passed &= found;
}
// Wait awhile for the listener
listener.join(MAX_RUNTIME_MILLISECONDS);
if (listener.isAlive())
listener.interrupt();
} finally {
if (listener.isAlive()) { // => exception raised so wait for listener
log("Give listener a chance....");
sleep(MAX_RUNTIME_MILLISECONDS);
listener.interrupt();
}
log("Sent: " + sent + " Received: " + _receivedEntries.size());
// Look for missing entries in receivedEntries
missingEntries.forEach(me -> checkMissingEntry(me));
log("All passed? " + passed);
// Try to find missing entries by searching from the start...
searchFromStartFor(missingEntries);
_chronicle.close();
_chronicle = null;
// Re-initialise CQ and look for missing entries again...
log("Re-initialise");
init();
searchFromStartFor(missingEntries);
}
}
private void buildCQ() {
try {
// build chronicle queue
_chronicle = ChronicleQueueBuilder.vanilla(_cqPath).cycleLength(CQ_LEN).entriesPerCycle(CQ_ENT)
.indexBlockSize(INDX_SIZE).dataBlockSize(DATA_SIZE).build();
} catch (IOException e) {
throw new InitializationException("Failed to initialize Active Trade Store.", e);
}
}
private long writeQEntry(byte dataArray[], boolean fsync) throws IOException {
ExcerptAppender appender = _chronicle.createAppender();
return writeData(appender, dataArray, fsync);
}
private boolean checkEntry(int seqNo, long entry) throws IOException {
ExcerptTailer tailer = _chronicle.createTailer();
if (!tailer.index(entry)) {
log("SeqNo: " + seqNo + " for entry + " + entry + " not found");
return false;
}
boolean isMarker = isMarker(tailer);
boolean isFsyncData = isFsyncData(tailer);
boolean isNormalData = isNormalData(tailer);
String type = isMarker ? "MARKER" : isFsyncData ? "FSYNC" : isNormalData ? "NORMALDATA" : "UNKNOWN";
log("Entry: " + entry + "(" + seqNo + ") is " + type);
return true;
}
private void log(String string) {
System.out.println(string);
}
private void searchFromStartFor(Collection<Long> missingEntries) throws IOException {
Set<Long> foundEntries = new HashSet<Long>(Q_ENTRIES);
ExcerptTailer tailer = _chronicle.createTailer();
tailer.toStart();
while (tailer.nextIndex())
foundEntries.add(tailer.index());
Iterator<Long> iter = missingEntries.iterator();
long foundCount = 0;
while (iter.hasNext()) {
long me = iter.next();
if (foundEntries.contains(me)) {
log("Found missing entry: " + me);
foundCount++;
}
}
log("searchFromStartFor Found: " + foundCount + " of: " + missingEntries.size() + " missing entries");
}
private void checkMissingEntry(long missingEntry) {
if (_receivedEntries.contains(missingEntry))
log("Received missing entry:" + missingEntry);
}
Set<Long> _receivedEntries = new HashSet<Long>(Q_ENTRIES);
private Thread listen() {
Thread returnVal = new Thread("Listener") {
public void run() {
try {
int receivedCount = 0;
ExcerptTailer tailer = _chronicle.createTailer();
tailer.toStart();
while (receivedCount < Q_ENTRIES) {
if (tailer.nextIndex()) {
_receivedEntries.add(tailer.index());
} else {
ChronicleTests.this.sleep(1);
}
}
log("listener complete");
} catch (IOException e) {
log("Interupted before receiving all entries");
}
}
};
return returnVal;
}
private void sleep(long interval) {
try {
Thread.sleep(interval);
} catch (InterruptedException e) {
// No action required
}
}
protected static final int THREAD_ID_LEN = Integer.SIZE / Byte.SIZE;
protected static final int DATA_TYPE_LEN = Byte.SIZE / Byte.SIZE;
protected static final int TIMESTAMP_LEN = Long.SIZE / Byte.SIZE;
protected static final int CRC_LEN = Long.SIZE / Byte.SIZE;
protected static long writeData(ExcerptAppender appender, byte dataArray[],
boolean fsync) {
appender.startExcerpt(DATA_TYPE_LEN + THREAD_ID_LEN + dataArray.length
+ CRC_LEN);
appender.nextSynchronous(fsync);
if (fsync) {
appender.writeByte(FSYNC_DATA);
} else {
appender.writeByte(NORMAL_DATA);
}
appender.writeInt(AffinitySupport.getThreadId());
appender.write(dataArray);
appender.writeLong(CRCCalculator.calcDataAreaCRC(appender));
appender.finish();
return appender.lastWrittenIndex();
}
protected static boolean isMarker(ExcerptCommon excerpt) {
if (isCqStartMarker(excerpt) || isStartMarker(excerpt) || isEndMarker(excerpt)) {
return true;
}
return false;
}
protected static boolean isCqStartMarker(ExcerptCommon excerpt) {
return isDataTypeMatched(excerpt, CQ_START_DATA);
}
protected static boolean isStartMarker(ExcerptCommon excerpt) {
return isDataTypeMatched(excerpt, TH_START_DATA);
}
protected static boolean isEndMarker(ExcerptCommon excerpt) {
return isDataTypeMatched(excerpt, TH_END_DATA);
}
protected static boolean isData(ExcerptTailer tailer, long index) {
if (!tailer.index(index)) {
return false;
}
return isData(tailer);
}
private static void movePosition(ExcerptCommon excerpt, long position) {
if (excerpt.position() != position)
excerpt.position(position);
}
private static void moveToFsyncFlagPos(ExcerptCommon excerpt) {
movePosition(excerpt, 0);
}
private static boolean isDataTypeMatched(ExcerptCommon excerpt, byte type) {
moveToFsyncFlagPos(excerpt);
byte b = excerpt.readByte();
if (b == type) {
return true;
}
return false;
}
protected static boolean isNormalData(ExcerptCommon excerpt) {
return isDataTypeMatched(excerpt, NORMAL_DATA);
}
protected static boolean isFsyncData(ExcerptCommon excerpt) {
return isDataTypeMatched(excerpt, FSYNC_DATA);
}
/**
* Check if this entry is Data
*
* #param excerpt
* #return true if the entry is data
*/
protected static boolean isData(ExcerptCommon excerpt) {
if (isNormalData(excerpt) || isFsyncData(excerpt)) {
return true;
}
return false;
}
}
The problem only occurs when initialising the data-block size with a value that is not a power of two. The built-in configurations on IndexedChronicleQueueBuilder (small(), medium(), large()) take care to initialise using powers of two which provided the clue as to the appropriate usage.
Notwithstanding the above response regarding support, which I totally appreciate, it would be useful if a knowledgeable Chronicle user could confirm that the integrity of Chronicle Queue depends on using a data-block size of a power of two.

How to display a clob data more than 66k in text field in oracle forms 12g

I don't know how to put clob data which is of more than 66k in Oracle Forms.
The text field will take a long data type and that too not more than 66k. I have a clob data and wanted to display.
The easiest way to display this is too make a forms bean (PJC).
Then you can display it in a JTextPane which is big enough.
You can then make a function that gives you piece of 32000 characters from the clob and give them to the bean.
package be.axi.oracle.forms.jpc;
import java.awt.event.FocusEvent;
import java.awt.event.FocusListener;
import java.awt.event.KeyEvent;
import java.awt.event.KeyListener;
import javax.swing.JScrollPane;
import javax.swing.JTextPane;
import javax.swing.SwingUtilities;
import javax.swing.UIManager;
import javax.swing.text.BadLocationException;
import oracle.forms.handler.IHandler;
import oracle.forms.properties.ID;
import oracle.forms.ui.CustomEvent;
import oracle.forms.ui.VBean;
/**
* A TextArea to get more than 64k texts
* #author Francois Degrelle
* #version 1.1
*/
public class BigTextArea extends VBean implements FocusListener, KeyListener
{
private static final long serialVersionUID = 1L;
public final static ID ADDTEXT = ID.registerProperty("ADD_TEXT");
public final static ID VALUE = ID.registerProperty("VALUE");
public final static ID SHOW = ID.registerProperty("SHOW");
public final static ID CLEAR = ID.registerProperty("CLEAR");
public final static ID GETTEXT = ID.registerProperty("GET_TEXT");
public final static ID GETLENGTH = ID.registerProperty("GET_LENGTH");
public final static ID pLostFocus = ID.registerProperty("BEAN_QUITTED");
private IHandler m_handler;
private int iStart = 0 ;
private int iChunk = 8192 ;
private StringBuffer sb = new StringBuffer();
protected JTextPane jtp = new JTextPane();
public BigTextArea()
{
super();
try
{
UIManager.setLookAndFeel(UIManager.getSystemLookAndFeelClassName());
SwingUtilities.updateComponentTreeUI(this);
}
catch (Exception ex)
{
ex.printStackTrace();
}
JScrollPane ps = new JScrollPane(jtp);
ps.setBorder(null);
add(ps);
ps.setVisible(true);
}
public void init(IHandler handler)
{
m_handler = handler;
super.init(handler);
addFocusListener(this);
jtp.addFocusListener(this);
jtp.addKeyListener(this);
}
public boolean setProperty(ID property, Object value)
{
//
// add text to the TextArea
//
if (property == ADDTEXT)
{
sb.append(value.toString()) ;
printMemory();
return true;
}
//
// display the whole text
//
else if(property == SHOW)
{
jtp.setText(sb.toString());
jtp.setCaretPosition(0);
sb = new StringBuffer();
System.gc();
printMemory();
return true ;
}
//
// clear the TextArea
//
else if(property == CLEAR) {
jtp.setText("");
return true ;
}
else
{
return super.setProperty(property, value);
}
}
/*-----------------------------------*
* Get the result string from Forms *
*-----------------------------------*/
public Object getProperty(ID pId)
{
//
// returns the text length
//
if (pId == GETLENGTH)
{
return "" + jtp.getText().length();
}
//
// returns the chunks
//
else if (pId == GETTEXT) {
String s = "" ;
int iLen = jtp.getText().length() ;
while (iStart < iLen)
{
try{
if(iStart+iChunk <= iLen) s = jtp.getText(iStart,iChunk);
else s = jtp.getText(iStart,iLen-iStart);
iStart += iChunk ;
return s ;
}
catch (BadLocationException ble) { ble.printStackTrace(); return ""; }
}
iStart = 0 ;
return "" ;
}
else
{
return super.getProperty(pId);
}
} // getProperty()
/*--------------------------*
* handle the focus events *
*--------------------------*/
public void focusGained(FocusEvent e)
{
if (e.getComponent() == this)
{
// put the focus on the component
jtp.requestFocus();
}
try
{
m_handler.setProperty(FOCUS_EVENT, e);
}
catch ( Exception ex )
{
;
}
}
public void focusLost(FocusEvent e)
{
CustomEvent ce = new CustomEvent(m_handler, pLostFocus);
dispatchCustomEvent(ce);
}
/*--------------------------*
* Handle the Key listener *
*--------------------------*/
public void keyPressed(KeyEvent e)
{
/*
** Allows TAB key to exit the item
** and continue the standard Forms navigation
*/
if ( (e.getKeyCode() == KeyEvent.VK_TAB) )
{
try
{
m_handler.setProperty(KEY_EVENT, e);
}
catch ( Exception ex )
{
}
}
}
public void keyTyped(KeyEvent e)
{
}
public void keyReleased(KeyEvent e)
{
}
// utility to output the memory available
private void printMemory() {
System.out.println("Java memory in use = "
+ (Runtime.getRuntime().totalMemory()
- Runtime.getRuntime().freeMemory()));
}
}

ClassCastException: org.apache.hadoop.io.LongWritable cannot be cast to org.apache.hadoop.hbase.io.ImmutableBytesWritable

I have exported a table from Hbase to a file in almost like org.apache.hadoop.mapreduce.lib.output.TextOutputFormat,To import the exported Text format file I have tweaked the code of Import from the open source to support importing text based files instead of SequenceFile.
job.setInputFormatClass(TextInputFormat.class);
while running the Import class I am getting the following exception.
java.lang.ClassCastException: org.apache.hadoop.io.LongWritable cannot be cast to org.apache.hadoop.hbase.io.ImmutableBytesWritable
at Import$Importer.map(Import.java:1)
at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:144)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:764)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:370)
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:212)
here is my Export Class which was tweaked to write the content to the file from the ExpoterTable.
public class Export
{
private static final Log LOG = LogFactory.getLog(Export.class);
final static String NAME = "export";
final static String RAW_SCAN = "hbase.mapreduce.include.deleted.rows";
private static OutputStream out;
private static final String utf8 = "UTF-8";
private static final byte[] newline;
private static final byte[] keyValueSeparator;
static {
try {
newline = "\n".getBytes(utf8);
keyValueSeparator = "\t".getBytes(utf8);
}
catch (UnsupportedEncodingException uee) {
throw new IllegalArgumentException("can't find " + utf8 + " encoding");
}
}
/**
* Mapper.
*/
static class ExporterTable extends TableMapper<ImmutableBytesWritable, Result>
{
/**
* #param row The current table row key.
* #param value The columns.
* #param context The current context.
* #throws IOException When something is broken with the data.
* #see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN,
* org.apache.hadoop.mapreduce.Mapper.Context)
*/
#Override
public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException {
try {
context.write(row, value);
write(row, value);
System.out.println(row);
System.out.println(value);
}
catch (InterruptedException e) {
e.printStackTrace();
}
}
}
/**
* Sets up the actual job.
*
* #param conf The current configuration.
* #param args The command line parameters.
* #return The newly created job.
* #throws IOException When setting up the job fails.
*/
public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException {
String tableName = args[0];
// this.out = new DataOutputStream(fos);
Path outputDir = new Path(args[1]);
Job job = new Job(conf, NAME + "_" + tableName);
job.setJobName(NAME + "_" + tableName);
job.setJarByClass(ExporterTable.class);
// Set optional scan parameters
Scan s = getConfiguredScanForJob(conf, args);
TableMapReduceUtil.initTableMapperJob(tableName, s, ExporterTable.class, ImmutableBytesWritable.class, IntWritable.class, job);
// No reducers. Just write straight to output files.
job.setNumReduceTasks(0);
job.setOutputValueClass(Text.class);
// FileOutputFormat.setOutputPath(job, outputDir);
job.setOutputFormatClass(NullOutputFormat.class);
TableMapReduceUtil.addHBaseDependencyJars(conf);
TableMapReduceUtil.addDependencyJars(conf, JsonProcessingException.class);
TableMapReduceUtil.addDependencyJars(job);
return job;
}
private static Scan getConfiguredScanForJob(Configuration conf, String[] args) throws IOException {
Scan s = new Scan();
// Optional arguments.
// Set Scan Versions
int versions = args.length > 2 ? Integer.parseInt(args[2]) : 1;
s.setMaxVersions(versions);
// Set Scan Range
long startTime = args.length > 3 ? Long.parseLong(args[3]) : 0L;
long endTime = args.length > 4 ? Long.parseLong(args[4]) : Long.MAX_VALUE;
s.setTimeRange(startTime, endTime);
// Set cache blocks
s.setCacheBlocks(false);
// Set Scan Column Family
boolean raw = Boolean.parseBoolean(conf.get(RAW_SCAN));
if (raw) {
s.setRaw(raw);
}
if (conf.get(TableInputFormat.SCAN_COLUMN_FAMILY) != null) {
s.addFamily(Bytes.toBytes(conf.get(TableInputFormat.SCAN_COLUMN_FAMILY)));
}
// Set RowFilter or Prefix Filter if applicable.
Filter exportFilter = getExportFilter(args);
if (exportFilter != null) {
LOG.info("Setting Scan Filter for Export.");
s.setFilter(exportFilter);
}
LOG.info("versions=" + versions + ", starttime=" + startTime + ", endtime=" + endTime + ", keepDeletedCells=" + raw);
return s;
}
private static Filter getExportFilter(String[] args) {
Filter exportFilter = null;
String filterCriteria = (args.length > 5) ? args[5] : null;
if (filterCriteria == null)
return null;
if (filterCriteria.startsWith("^")) {
String regexPattern = filterCriteria.substring(1, filterCriteria.length());
exportFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator(regexPattern));
}
else {
exportFilter = new PrefixFilter(Bytes.toBytes(filterCriteria));
}
return exportFilter;
}
/*
* #param errorMsg Error message. Can be null.
*/
private static void usage(final String errorMsg) {
if (errorMsg != null && errorMsg.length() > 0) {
System.err.println("ERROR: " + errorMsg);
}
System.err.println("Usage: Export [-D <property=value>]* <tablename> <outputdir> [<versions> " + "[<starttime> [<endtime>]] [^[regex pattern] or [Prefix] to filter]]\n");
System.err.println(" Note: -D properties will be applied to the conf used. ");
System.err.println(" For example: ");
System.err.println(" -D mapred.output.compress=true");
System.err.println(" -D mapred.output.compression.codec=org.apache.hadoop.io.compress.GzipCodec");
System.err.println(" -D mapred.output.compression.type=BLOCK");
System.err.println(" Additionally, the following SCAN properties can be specified");
System.err.println(" to control/limit what is exported..");
System.err.println(" -D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=<familyName>");
System.err.println(" -D " + RAW_SCAN + "=true");
System.err.println("For performance consider the following properties:\n" + " -Dhbase.client.scanner.caching=100\n" + " -Dmapred.map.tasks.speculative.execution=false\n" + " -Dmapred.reduce.tasks.speculative.execution=false");
}
/**
* Main entry point.
*
* #param args The command line parameters.
* #throws Exception When running the job fails.
*/
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.set("mapreduce.framework.name", "local");
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length < 2) {
usage("Wrong number of arguments: " + otherArgs.length);
System.exit(-1);
}
boolean jobStatus = false;
Job job = createSubmittableJob(conf, otherArgs);
try {
File f = new File("Test");
out = new FileOutputStream(f);
jobStatus = job.waitForCompletion(true);
}
catch (Exception e) {
e.printStackTrace();
}
finally {
IOUtils.closeStream(out);
}
// convertTextToSequence(conf);
System.exit(jobStatus ? 0 : 1);
}
public static void write(ImmutableBytesWritable key, Result value) throws IOException {
boolean nullKey = key == null;
boolean nullValue = value == null;
if (nullKey && nullValue) {
return;
}
if (!nullKey) {
writeObject(key);
}
if (!(nullKey || nullValue)) {
out.write(keyValueSeparator);
}
if (!nullValue) {
writeObject(value);
}
out.write(newline);
}
/**
* Write the object to the byte stream, handling Text as a special
* case.
* #param o the object to print
* #throws IOException if the write throws, we pass it on
*/
private static void writeObject(Object o) throws IOException {
if (o instanceof Text) {
Text to = (Text) o;
out.write(to.getBytes(), 0, to.getLength());
}
else {
out.write(o.toString().getBytes(utf8));
}
}
}
any help is appreciated .
You have declared map method as follows and writing output key as ImmutableBytesWritable
public void map(ImmutableBytesWritable row, Result value, Context context)
throws IOException {
try {
context.write(row, value);
You have to override job parameters as follows to set MapOutputKeyClass and MapOutPutvalueClass
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setMapOutputValueClass(Result.class);
Have a look at working Example : 7. Export an HBase table to File

Upload File data into database using spring mvc and hibernate

I need to upload data into 2 database tables(factory and factoryType) using the below form but it isnt working can somebody take a look :
Factory table:factoryId,factoryname
FactoryType table: factoryType,factoryTypeId
FactoryConf table: factoryID,factoryTypeId
We are using hibernate for the database operations.
Model:
#Entity
#Table(name = "FactoryConf", uniqueConstraints = {
#UniqueConstraint(columnNames = { "factoryId" } )
})
public class FactoryConf {
#Id
long factoryId;
#OneToOne
#JoinColumn(name = "factoryId", insertable = false, updatable = false)
Factory factory;
#ManyToOne(optional = false)
#JoinColumn(name = "factoryTypeId")
FactoryType factoryType;
public FactoryConf() {
super();
}
public FactoryConf(long factoryId, FactoryType factoryType) {
super();
this.factoryType = factoryType;
this.factoryId = factoryId;
}
public Factory getFactory() {
return factory;
}
public void setFactory(Factory factory) {
this.factory = factory;
}
public FactoryType getFactoryType() {
return factoryType;
}
public void setFactoryType(FactoryType factoryType) {
this.factoryType = factoryType;
}
public long getFactoryId() {
return factoryId;
}
public void setFactoryId(long factoryId) {
this.factoryId = factoryId;
}
public FactoryType getFactoryTypeByFactoryID(long factoryId){
return factoryType;
}
}
Bean class:
/**
* This bean is defined to parse each record from the CSV file.
* All records are mapped to instances of this bean class.
*
*/
public class FactoryCSVFileInputBean {
private String upload_id;
private String file_name;
private byte file_data;
private long Id;
private String Name;
private String Type;
//getter setters
}
CSV parsing:
/**
* This class is defined for following actions
* 1. Validate the input CSV file format, header columns.
* 2. Parse the CSV file into a list of beans.
* 3. Validate input records with missing data and prepare a valid factory list to be processed. *
*/
public class FactoryCSVUtil {
private static Log log = LogFactory.getLog(FactoryCSVUtil.class);
private static final List<String> fileHeaderFields = new ArrayList<String>();
private static final String UTF8CHARSET = "UTF-8";
static{
for (Field f : FactoryCSVFileInputBean.class.getDeclaredFields()) {
fileHeaderFields.add(f.getName());
}
}
public static List<FactoryCSVFileInputBean> getCSVInputList(InputStream inputStream){
CSVReader reader = null;
List<FactoryCSVFileInputBean> csvList = null;
FactoryCSVFileInputBean inputRecord = null;
String[] header = null;
String[] row = null;
try {
reader = new CSVReader(new InputStreamReader(inputStream,UTF8CHARSET));
csvList = new ArrayList<FactoryCSVFileInputBean>();
header = reader.readNext();
boolean isEmptyLine = true;
while ((row = reader.readNext()) != null) {
isEmptyLine = true;
if(!(row.length==1 && StringUtils.isBlank(row[0]))){//not an empty line, not even containing ','
inputRecord = new FactoryCSVFileInputBean();
isEmptyLine = populateFields(inputRecord, header, row);
if(row.length != header.length)
//inputRecord.setUploadStatus("Not Loaded - Missing or invalid Data");
if(!isEmptyLine)
csvList.add(inputRecord);
}
}
} catch (IOException e) {
log.debug("IOException while accessing FactoryCSVFileInputBean: " + e);
return null;
} catch (IllegalAccessException e) {
log.debug("IllegalAccessException while accessing FactoryCSVFileInputBean: " + e);
return null;
} catch (InvocationTargetException e) {
log.debug("InvocationTargetException while copying FactoryCSVFileInputBean properties: " + e);
return null;
} catch (Exception e) {
log.debug("Exception while parsing CSV file: " + e);
return null;
}finally{
try{
if(reader!=null)
reader.close();
}catch(IOException ioe){}
}
return csvList;
}
protected static boolean populateFields(FactoryCSVFileInputBean inputRecord,String[] header, String[] row) throws IllegalAccessException, InvocationTargetException {
boolean isEmptyLine = true;
for (int i = 0; i < row.length; i++) {
String val = row[i];
if(!StringUtils.isBlank(val)){
BeanUtilsBean.getInstance().copyProperty(inputRecord, header[i], val);
isEmptyLine = false;
} else {
//inputRecord.setUploadStatus(String.format("Not Loaded - Missing or invalid Data for:%s",header[i]));
}
}
return isEmptyLine;
}
public static void validateInputFile(CommonsMultipartFile csvFile, Model model){
InputStream inputStream = null;
CSVReader reader = null;
String fileName = csvFile.getOriginalFilename();
String fileExtension = fileName.substring(fileName.lastIndexOf('.') + 1);
if(fileExtension.toUpperCase().equals("CSV")){
try{
inputStream = csvFile.getInputStream();
reader = new CSVReader(new InputStreamReader(inputStream,UTF8CHARSET));
String[] header = reader.readNext();
if(header!=null){
for (int i = 0; i < header.length; i++) {
if(!header[i].equals("") && !fileHeaderFields.contains(header[i])){
log.debug("Invalid Column found in upload file: " + header[i]);
model.addAttribute("failureMsg", "Invalid Column found in upload file: " + header[i]);
break;
}
}
for(csvHeaderFieldsEnum field : csvHeaderFieldsEnum.values()){
if(!Arrays.asList(header).contains(field.getValue())){
log.debug("Missing column in upload file: " + field.getValue());
model.addAttribute("failureMsg", "Missing column in upload file: " + field.getValue());
break;
}
}
}else{
model.addAttribute("failureMsg", "File is Empty - Please select a valid file");
}
String[] data = reader.readNext();
if(data==null){
log.debug("Empty file with header - No data found");
model.addAttribute("failureMsg", "Empty file with header - No data found");
}
}catch(IOException e){
log.debug("IOException in reading the CSV file: " + e);
model.addAttribute("failureMsg", "Exception in reading the CSV file");
}finally{
if(reader!=null)
try{
reader.close();
}catch(IOException e){ log.debug("IOException in closing reader of CSV file: " + e);}
}
}
else{
model.addAttribute("failureMsg", "Invalid file format - Please select a CSV file");
}
}
}
Model
public class FactoryUploadForm {
private CommonsMultipartFile fileData;
private String uploadComment;
/**
* #return the fileData
*/
public CommonsMultipartFile getFileData() {
return fileData;
}
/**
* #param fileData the fileData to set
*/
public void setFileData(CommonsMultipartFile fileData) {
this.fileData = fileData;
}
/**
* #return the uploadComment
*/
public String getUploadComment() {
return uploadComment;
}
/**
* #param uploadComment the uploadComment to set
*/
public void setUploadComment(String uploadComment) {
this.uploadComment = uploadComment;
}
public String toString(){
return " CSVFileName: " + getFileData().getOriginalFilename() + "; Upload Comment: " + uploadComment;
}
}
Controller
#Controller
public class FactoryUploadDownloadController {
private static final Log logger = LogFactory.getLog(FactoryUploadDownloadController.class);
#Resource
Service Service;
#Resource
FactoryUploadRepository repository;
#RequestMapping(value = "/submitUploadFactoryForm")
public String uploadFactory(FactoryUploadForm uploadform,
HttpServletRequest request, Model model, BindingResult result) {
logger.debug("====================================================================");
List<FactoryCSVFileInputBean> csvList = null;
List<FactoryType> factoryTypes = Service.getFactoryTypes();
try {
CommonsMultipartFile file = uploadform.getFileData();
// parse csv file to list
csvList = FactoryCSVUtil.getCSVInputList(file.getInputStream());
if (csvList == null) {
model.addAttribute("failureMsg","Error in file parsing - Please verify the file");
logger.debug("---------------------------------------------------------");
return "sucess";
}
} catch (Exception e) {
logger.debug("sorry this isn't working for you");
}
try {
CommonsMultipartFile file = uploadform.getFileData();
for (FactoryCSVFileInputBean inputRecord : csvList) {
Factory factoryentity = new Factory();
factoryentity.setId(inputRecord.getId());
factoryentity.setName(inputRecord.getName());
factoryentity = this.Service.saveFactory(factoryentity);
FactoryConf factoryconf = new FactoryConf();
factoryconf.setFactory(factoryentity);
factoryconf.setFactoryType(pickFactoryType(factoryTypes,inputRecord.getType()));
model.addAttribute("factoryconf", factoryconf);
this.Service.savefactoryCfg(factoryconf);
}
} catch (Exception e) {
logger.debug("sorry this isnt working for you");
}
return "success";
}
private FactoryType pickFactoryType(List<FactoryType> types, String typeName) {
for (FactoryType type : types) {
if (type.getFactoryType().equalsIgnoreCase(typeName))
return type;
}
throw new RuntimeException(String.format("Factory Type Invalid :%s", typeName));
}
}
From your question, I understand that you are not able to parse data from a CSV file. Here is sample code for similar task. I think it should help.

Resources