Hadoop Data Set Map Reduce DataJoin - hadoop

Code
I tried to run DataJoin example of Hadoop in Action Book.
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
// import org.apache.commons.logging.Log;
// import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.contrib.utils.join.*;
public class MultiDataSetJoinMR extends Configured implements Tool
{
public static class MapClass extends DataJoinMapperBase
{
protected Text generateInputTag(String inputFile)
{
String datasource = inputFile.split("-")[0];
return new Text(datasource);
}
protected Text generateGroupKey(TaggedMapOutput aRecord)
{
String line = ((Text) aRecord.getData()).toString();
String[] tokens = line.split(",");
String groupKey = tokens[0];
return new Text(groupKey);
}
protected TaggedMapOutput generateTaggedMapOutput(Object value)
{
TaggedWritable retv = new TaggedWritable((Text) value);
retv.setTag(this.inputTag);
return retv;
}
}
public static class Reduce extends DataJoinReducerBase
{
protected TaggedMapOutput combine(Object[] tags, Object[] values)
{
if (tags.length < 2) return null;
String joinedStr = "";
for (int i=0; i<values.length; i++)
{
if (i > 0) joinedStr += ",";
TaggedWritable tw = (TaggedWritable) values[i];
String line = ((Text) tw.getData()).toString();
String[] tokens = line.split(",", 2);
joinedStr += tokens[1];
}
TaggedWritable retv = new TaggedWritable(new Text(joinedStr));
retv.setTag((Text) tags[0]);
return retv;
}
}
public static class TaggedWritable extends TaggedMapOutput
{
private Writable data;
public TaggedWritable() {
this.tag = new Text();
}
public TaggedWritable(Writable data)
{
this.tag = new Text("");
this.data = data;
}
public Writable getData()
{
return data;
}
public void write(DataOutput out) throws IOException
{
this.tag.write(out);
this.data.write(out);
}
public void readFields(DataInput in) throws IOException
{
this.tag.readFields(in);
this.data.readFields(in);
}
}
public int run(String[] args) throws Exception
{
Configuration conf = getConf();
JobConf job = new JobConf(conf, MultiDataSetJoinMR.class);
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length != 2)
{
System.err.println("Usage: wordcount <in> <out>");
System.exit(2);
}
Path in = new Path(args[0]);
Path out = new Path(args[1]);
FileInputFormat.setInputPaths(job, in);
FileOutputFormat.setOutputPath(job, out);
job.setJobName("DataJoin");
job.setMapperClass(MapClass.class);
job.setReducerClass(Reduce.class);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(TaggedWritable.class);
job.set("mapred.textoutputformat.separator", ",");
JobClient.runJob(job);
return 0;
}
public static void main(String[] args) throws Exception
{
int res = ToolRunner.run(new Configuration(),
new MultiDataSetJoinMR(),
args);
System.exit(res);
}
}
Running Command
./hadoop jar MultiDataSetJoin.jar /home/project/dataset /home/project/out
Error
But I am facing following issues.
15 Mar, 2013 4:29:45 PM org.apache.hadoop.metrics.jvm.JvmMetrics init
INFO: Initializing JVM Metrics with processName=JobTracker, sessionId=
15 Mar, 2013 4:29:45 PM org.apache.hadoop.mapred.JobClient configureCommandLineOptions
WARNING: No job jar file set. User classes may not be found. See JobConf(Class) or JobConf#setJar(String).
15 Mar, 2013 4:29:45 PM org.apache.hadoop.mapred.FileInputFormat listStatus
INFO: Total input paths to process : 2
15 Mar, 2013 4:29:45 PM org.apache.hadoop.mapred.JobClient monitorAndPrintJob
INFO: Running job: job_local_0001
15 Mar, 2013 4:29:45 PM org.apache.hadoop.mapred.FileInputFormat listStatus
INFO: Total input paths to process : 2
15 Mar, 2013 4:29:45 PM org.apache.hadoop.mapred.MapTask runOldMapper
INFO: numReduceTasks: 1
15 Mar, 2013 4:29:45 PM org.apache.hadoop.mapred.MapTask$MapOutputBuffer <init>
INFO: io.sort.mb = 100
15 Mar, 2013 4:29:45 PM org.apache.hadoop.mapred.MapTask$MapOutputBuffer <init>
INFO: data buffer = 79691776/99614720
15 Mar, 2013 4:29:45 PM org.apache.hadoop.mapred.MapTask$MapOutputBuffer <init>
INFO: record buffer = 262144/327680
15 Mar, 2013 4:29:45 PM org.apache.hadoop.mapred.LocalJobRunner$Job run
WARNING: job_local_0001
java.lang.RuntimeException: Error in configuring object
at org.apache.hadoop.util.ReflectionUtils.setJobConf(ReflectionUtils.java:93)
at org.apache.hadoop.util.ReflectionUtils.setConf(ReflectionUtils.java:64)
at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:117)
at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:354)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:307)
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:177)
Caused by: java.lang.reflect.InvocationTargetException
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:616)
at org.apache.hadoop.util.ReflectionUtils.setJobConf(ReflectionUtils.java:88)
... 5 more
Caused by: java.lang.RuntimeException: Error in configuring object
at org.apache.hadoop.util.ReflectionUtils.setJobConf(ReflectionUtils.java:93)
at org.apache.hadoop.util.ReflectionUtils.setConf(ReflectionUtils.java:64)
at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:117)
at org.apache.hadoop.mapred.MapRunner.configure(MapRunner.java:34)
... 10 more
Caused by: java.lang.reflect.InvocationTargetException
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:616)
at org.apache.hadoop.util.ReflectionUtils.setJobConf(ReflectionUtils.java:88)
... 13 more
Caused by: java.lang.NullPointerException
at MultiDataSetJoinMR$MapClass.generateInputTag(MultiDataSetJoinMR.java:31)
at org.apache.hadoop.contrib.utils.join.DataJoinMapperBase.configure(DataJoinMapperBase.java:60)
... 18 more
null15 Mar, 2013 4:29:46 PM org.apache.hadoop.mapred.JobClient monitorAndPrintJob
INFO: map 0% reduce 0%
15 Mar, 2013 4:29:46 PM org.apache.hadoop.mapred.JobClient monitorAndPrintJob
INFO: Job complete: job_local_0001
15 Mar, 2013 4:29:46 PM org.apache.hadoop.mapred.Counters log
INFO: Counters: 0
Exception in thread "main" java.io.IOException: Job failed!
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:1252)
at MultiDataSetJoinMR.run(MultiDataSetJoinMR.java:123)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65)
at MultiDataSetJoinMR.main(MultiDataSetJoinMR.java:128)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:616)
at com.intellij.rt.execution.application.AppMain.main(AppMain.java:120)
From log trace i can identify that inputFile variable gets null value in below method,
protected Text generateInputTag(String inputFile)
{
String datasource = inputFile.split("-")[0];
return new Text(datasource);
}
I dont know from where it gets called and how fix it. Can anyone help me please

Related

Null pointer exception in hadoop reducer

I am facing the NullPointerException with the below code. It would be great if some one can review and help me with the program.
The mapper is running fine but, I get an NPE, when I am try to split the value at the iterator. Please help me figure out my mistake. I have attached the mapper out below.
Toppermain.java
package TopperPackage;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class TopperMain {
//hadoop jar worcount.jar ars[0] args[1]
public static void main(String[] args) throws Exception {
Job myhadoopJob = new Job();
myhadoopJob.setJarByClass(TopperMain.class);
myhadoopJob.setJobName("Finding topper based on subject");
FileInputFormat.addInputPath(myhadoopJob, new Path(args[0]));
FileOutputFormat.setOutputPath(myhadoopJob, new Path(args[1]));
myhadoopJob.setInputFormatClass(TextInputFormat.class);
myhadoopJob.setOutputFormatClass(TextOutputFormat.class);
myhadoopJob.setMapperClass(TopperMapper.class);
myhadoopJob.setReducerClass(TopperReduce.class);
myhadoopJob.setMapOutputKeyClass(Text.class);
myhadoopJob.setMapOutputValueClass(Text.class);
myhadoopJob.setOutputKeyClass(Text.class);
myhadoopJob.setOutputValueClass(Text.class);
System.exit(myhadoopJob.waitForCompletion(true) ? 0 : 1);
}
}
TopperMapper.java
package TopperPackage;
import java.io.IOException;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
/*Surender,87,60,50,50,80
Raj,80,70,80,85,60
Anten,81,60,50,70,100
Dinesh,60,90,80,80,70
Priya,80,85,91,60,75
*/
public class TopperMapper extends Mapper<LongWritable, Text, Text, Text>
{
String temp,temp2;
protected void map(LongWritable key, Text value,Context context)
throws IOException, InterruptedException {
String record = value.toString();
String[] parts = record.split(",");
temp=parts[0];
temp2=temp+ "\t" + parts[1];
context.write(new Text("Tamil"),new Text(temp2));
temp2=temp+ "\t" + parts[2];
context.write(new Text("English"),new Text(temp2));
temp2=temp+ "\t" + parts[3];
context.write(new Text("Maths"),new Text(temp2));
temp2=temp+ "\t" + parts[4];
context.write(new Text("Science"),new Text(temp2));
temp2=temp+ "\t" + parts[5];
context.write(new Text("SocialScrience"),new Text(temp2));
}
}
TopperReduce.java
package TopperPackage;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class TopperReduce extends Reducer<Text, Text, Text, Text> {
int temp;
private String[] names;
private int[] marks;
public void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
String top = "";
int count =0,topmark;
marks = null;
String befsplit;
String[] parts=null;
names = null;
for (Text t : values)
{
befsplit= t.toString();
parts = befsplit.split("\t");
names[count]=parts[0];
marks[count]=Integer.parseInt(parts[1]);
count = count+1;
}
topmark=calcTopper(marks);
top=names[topmark]+ "\t"+marks[topmark] ;
context.write(new Text(key), new Text(top));
}
public int calcTopper(int[] marks)
{
int count=marks.length;
temp=((marks[1]));
int i=0;
for (i=1;i<=(count-2);i++)
{
if(temp < marks[i+1])
{
temp = marks[i+1];
}
}
return i;
}
}
the error is
cloudera#cloudera-vm:~/Jarfiles$ hadoop jar TopperMain.jar /user/cloudera/inputfiles/topper/topperinput.txt /user/cloudera/outputfiles/topper/
14/08/24 23:17:07 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
14/08/24 23:17:08 INFO input.FileInputFormat: Total input paths to process : 1
14/08/24 23:17:09 INFO mapred.JobClient: Running job: job_201408241907_0012
14/08/24 23:17:10 INFO mapred.JobClient: map 0% reduce 0%
14/08/24 23:17:49 INFO mapred.JobClient: map 100% reduce 0%
14/08/24 23:18:03 INFO mapred.JobClient: Task Id : attempt_201408241907_0012_r_000000_0, Status : FAILED
java.lang.NullPointerException
at TopperPackage.TopperReduce.reduce(TopperReduce.java:25)
at TopperPackage.TopperReduce.reduce(TopperReduce.java:1)
at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:176)
at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:571)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:413)
at org.apache.hadoop.mapred.Child$4.run(Child.java:268)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1115)
at org.apache.hadoop.mapred.Child.main(Child.java:262)
attempt_201408241907_0012_r_000000_0: log4j:WARN No appenders could be found for logger (org.apache.hadoop.hdfs.DFSClient).
attempt_201408241907_0012_r_000000_0: log4j:WARN Please initialize the log4j system properly.
14/08/24 23:18:22 INFO mapred.JobClient: Task Id : attempt_201408241907_0012_r_000000_1, Status : FAILED
java.lang.NullPointerException
at TopperPackage.TopperReduce.reduce(TopperReduce.java:25)
at TopperPackage.TopperReduce.reduce(TopperReduce.java:1)
at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:176)
at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:571)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:413)
at org.apache.hadoop.mapred.Child$4.run(Child.java:268)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:396)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1115)
at org.apache.hadoop.mapred.Child.main(Child.java:262)
attempt_201408241907_0012_r_000000_1: log4j:WARN No appenders could be found for logger (org.apache.hadoop.hdfs.DFSClient).
attempt_201408241907_0012_r_000000_1: log4j:WARN Please initialize the log4j system properly.
I am getting the expected output from mapper but reducer is throwing error when splitting the output and storing in a variable.
The mapper output is
Tamil Surender 87
English Surender 60
Maths Surender 50
Science Surender 50
SocialScrience Surender 80
Tamil Raj 80
English Raj 70
Maths Raj 80
Science Raj 85
SocialScrience Raj 60
Tamil Anten 81
English Anten 60
Maths Anten 50
Science Anten 70
SocialScrience Anten 100
Tamil Dinesh 60
English Dinesh 90
Maths Dinesh 80
Science Dinesh 80
SocialScrience Dinesh 70
Tamil Priya 80
English Priya 85
Maths Priya 91
Science Priya 60
SocialScrience Priya 75
Any advice to point out my mistake is appreciated.
The error is due to you are initializing marks and names arrays to null and not initialize them properly. Please use the below reducer class.
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
public class TopperReduce extends Reducer<Text, Text, Text, Text> {
int temp;
private String[] names = new String[10];
private int[] marks = new int[10];
public void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
String top = "";
int count = 0, topmark;
String befsplit;
String[] parts = null;
for (Text t : values) {
befsplit = t.toString();
parts = befsplit.split("\t");
names[count] = parts[0];
marks[count] = Integer.parseInt(parts[1]);
count++;
}
topmark = calcTopper(marks);
top = names[topmark] + "\t" + marks[topmark];
context.write(new Text(key), new Text(top));
}
public int calcTopper(int[] marks) {
int count = marks.length;
int i = 0;
int highestMArk = 0;
int mark = 0;
int highestMarkIndex = 0;
for (; i < count; i++) {
mark = marks[i];
if (mark > highestMArk) {
highestMarkIndex = i;
}
}
return highestMarkIndex;
}
}
You are referring to a null array variable parts so you are getting this error,
change your code as i mentioned below it could work
public class TopperReduce extends Reducer<Text, Text, Text, Text> {
int temp;
private String[] names=new String[20];
private int[] marks= new int[20];
public void reduce(Text key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
String top = "";
int count =0,topmark;
for (Text t : values)
{
String befsplit= t.toString();
String[] parts = befsplit.split("\t");
names[count]=parts[0];
marks[count]=Integer.parseInt(parts[1]);
count = count+1;
}
topmark=calcTopper(marks);
top=names[topmark]+ "\t"+marks[topmark] ;
context.write(new Text(key), new Text(top));
}

Sample code for using hadoop mapreduce against cassandra

I have been trying to get a MapReduce sample code that comes with Cassandra running but I get run time error.
Source code:
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.Map.Entry;
import org.apache.cassandra.hadoop.cql3.CqlConfigHelper;
import org.apache.cassandra.hadoop.cql3.CqlOutputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.hadoop.cql3.CqlPagingInputFormat;
import org.apache.cassandra.hadoop.ConfigHelper;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import java.nio.charset.CharacterCodingException;
/**
* This counts the occurrences of words in ColumnFamily
* cql3_worldcount ( user_id text,
* category_id text,
* sub_category_id text,
* title text,
* body text,
* PRIMARY KEY (user_id, category_id, sub_category_id))
*
* For each word, we output the total number of occurrences across all body texts.
*
* When outputting to Cassandra, we write the word counts to column family
* output_words ( row_id1 text,
* row_id2 text,
* word text,
* count_num text,
* PRIMARY KEY ((row_id1, row_id2), word))
* as a {word, count} to columns: word, count_num with a row key of "word sum"
*/
public class WordCount extends Configured implements Tool
{
private static final Logger logger = LoggerFactory.getLogger(WordCount.class);
static final String KEYSPACE = "cql3_worldcount";
static final String COLUMN_FAMILY = "inputs";
static final String OUTPUT_REDUCER_VAR = "output_reducer";
static final String OUTPUT_COLUMN_FAMILY = "output_words";
private static final String OUTPUT_PATH_PREFIX = "/tmp/word_count";
private static final String PRIMARY_KEY = "row_key";
public static void main(String[] args) throws Exception
{
// Let ToolRunner handle generic command-line options
ToolRunner.run(new Configuration(), new WordCount(), args);
System.exit(0);
}
public static class TokenizerMapper extends Mapper<Map<String, ByteBuffer>, Map<String, ByteBuffer>, Text, IntWritable>
{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
private ByteBuffer sourceColumn;
protected void setup(org.apache.hadoop.mapreduce.Mapper.Context context)
throws IOException, InterruptedException
{
}
public void map(Map<String, ByteBuffer> keys, Map<String, ByteBuffer> columns, Context context) throws IOException, InterruptedException
{
for (Entry<String, ByteBuffer> column : columns.entrySet())
{
if (!"body".equalsIgnoreCase(column.getKey()))
continue;
String value = ByteBufferUtil.string(column.getValue());
logger.debug("read {}:{}={} from {}",
new Object[] {toString(keys), column.getKey(), value, context.getInputSplit()});
StringTokenizer itr = new StringTokenizer(value);
while (itr.hasMoreTokens())
{
word.set(itr.nextToken());
context.write(word, one);
}
}
}
private String toString(Map<String, ByteBuffer> keys)
{
String result = "";
try
{
for (ByteBuffer key : keys.values())
result = result + ByteBufferUtil.string(key) + ":";
}
catch (CharacterCodingException e)
{
logger.error("Failed to print keys", e);
}
return result;
}
}
public static class ReducerToFilesystem extends Reducer<Text, IntWritable, Text, IntWritable>
{
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException
{
int sum = 0;
for (IntWritable val : values)
sum += val.get();
context.write(key, new IntWritable(sum));
}
}
public static class ReducerToCassandra extends Reducer<Text, IntWritable, Map<String, ByteBuffer>, List<ByteBuffer>>
{
private Map<String, ByteBuffer> keys;
private ByteBuffer key;
protected void setup(org.apache.hadoop.mapreduce.Reducer.Context context)
throws IOException, InterruptedException
{
keys = new LinkedHashMap<String, ByteBuffer>();
String[] partitionKeys = context.getConfiguration().get(PRIMARY_KEY).split(",");
keys.put("row_id1", ByteBufferUtil.bytes(partitionKeys[0]));
keys.put("row_id2", ByteBufferUtil.bytes(partitionKeys[1]));
}
public void reduce(Text word, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException
{
int sum = 0;
for (IntWritable val : values)
sum += val.get();
context.write(keys, getBindVariables(word, sum));
}
private List<ByteBuffer> getBindVariables(Text word, int sum)
{
List<ByteBuffer> variables = new ArrayList<ByteBuffer>();
keys.put("word", ByteBufferUtil.bytes(word.toString()));
variables.add(ByteBufferUtil.bytes(String.valueOf(sum)));
return variables;
}
}
public int run(String[] args) throws Exception
{
String outputReducerType = "filesystem";
if (args != null && args[0].startsWith(OUTPUT_REDUCER_VAR))
{
String[] s = args[0].split("=");
if (s != null && s.length == 2)
outputReducerType = s[1];
}
logger.info("output reducer type: " + outputReducerType);
Job job = new Job(getConf(), "wordcount");
job.setJarByClass(WordCount.class);
job.setMapperClass(TokenizerMapper.class);
if (outputReducerType.equalsIgnoreCase("filesystem"))
{
job.setCombinerClass(ReducerToFilesystem.class);
job.setReducerClass(ReducerToFilesystem.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileOutputFormat.setOutputPath(job, new Path(OUTPUT_PATH_PREFIX));
}
else
{
job.setReducerClass(ReducerToCassandra.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setOutputKeyClass(Map.class);
job.setOutputValueClass(List.class);
job.setOutputFormatClass(CqlOutputFormat.class);
ConfigHelper.setOutputColumnFamily(job.getConfiguration(), KEYSPACE, OUTPUT_COLUMN_FAMILY);
job.getConfiguration().set(PRIMARY_KEY, "word,sum");
String query = "UPDATE " + KEYSPACE + "." + OUTPUT_COLUMN_FAMILY +
" SET count_num = ? ";
CqlConfigHelper.setOutputCql(job.getConfiguration(), query);
ConfigHelper.setOutputInitialAddress(job.getConfiguration(), "localhost");
ConfigHelper.setOutputPartitioner(job.getConfiguration(), "Murmur3Partitioner");
}
job.setInputFormatClass(CqlPagingInputFormat.class);
ConfigHelper.setInputRpcPort(job.getConfiguration(), "9160");
ConfigHelper.setInputInitialAddress(job.getConfiguration(), "localhost");
ConfigHelper.setInputColumnFamily(job.getConfiguration(), KEYSPACE, COLUMN_FAMILY);
ConfigHelper.setInputPartitioner(job.getConfiguration(), "Murmur3Partitioner");
CqlConfigHelper.setInputCQLPageRowSize(job.getConfiguration(), "3");
//this is the user defined filter clauses, you can comment it out if you want count all titles
CqlConfigHelper.setInputWhereClauses(job.getConfiguration(), "title='A'");
job.waitForCompletion(true);
return 0;
}
}
It compiles fine but I get this error:
Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/cassandra/hadoop/cql3/CqlPagingInputFormat
at WordCount.run(WordCount.java:230)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65)
at WordCount.main(WordCount.java:94)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.util.RunJar.main(RunJar.java:160)
Caused by: java.lang.ClassNotFoundException: org.apache.cassandra.hadoop.cql3.CqlPagingInputFormat
at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
... 8 more
I am using hadoop 1.2.1 and cassandra 2.0.4.
Help with this error or sample code or instruction for getting hadoop mapreduce to work with cassandra would be appreciated.
To solve the problem copy cassandra jar files to hadoop lib directory.
Please use following path
export HADOOP_CLASSPATH=/< path to cassandra >/lib/*:$HADOOP_CLASSPATH in /< hadoop path >/conf/hadoop-env.sh file.

getting Null Pointer Exception while doing Secondary sort

im just a beginner in hadoop.im getting null pointer exception while performing seconday sort
This is my mapper class
public void map(LongWritable key, Text value,
OutputCollector<Text, Employee> outputCollector, Reporter reporter)
throws IOException {
// TODO Auto-generated method stub
String employeeId = value.toString().split(",")[0];
String employeeName= value.toString().split(",")[1];
String employeeDept= value.toString().split(",")[2];
String employeejoinDate= value.toString().split(",")[3];
String employeSalary= value.toString().split(",")[4];
//System.out.println(employeSalary);
Employee employee=new Employee(Integer.parseInt(employeeId),employeeName,employeeDept,employeejoinDate,Integer.parseInt(employeSalary));
outputCollector.collect(new Text(employeeName),employee);
}
This is my reducer
public void reduce(Text arg0, Iterator<Employee> arg1,
OutputCollector<NullWritable,IntWritable> arg2, Reporter arg3)
throws IOException {
// TODO Auto-generated method stub
System.out.println("inside reducer");
while(arg1.hasNext()){
arg2.collect(NullWritable.get(),new IntWritable(arg1.next().getEmployeeSalary()));
}
this is my employee class
public class Employee implements WritableComparable<Employee>{
private int employeeId;
private String employeeName;
private String employeeDept;
private String employeeJoinDt;
private int employeeSalary;
public Employee(int employeeId,String employeeName,String employeeDept,String employeeJoinDt,int employeeSalary){
this.employeeId=employeeId;
this.employeeName=employeeName;
this.employeeDept=employeeDept;
this.employeeJoinDt=employeeJoinDt;
this.employeeSalary=employeeSalary;
}
public int getEmployeeId() {
return employeeId;
}
public void setEmployeeId(int employeeId) {
this.employeeId = employeeId;
}
public String getEmployeeName() {
return employeeName;
}
public void setEmployeeName(String employeeName) {
this.employeeName = employeeName;
}
public String getEmployeeDept() {
return employeeDept;
}
public void setEmployeeDept(String employeeDept) {
this.employeeDept = employeeDept;
}
public String getEmployeeJoinDt() {
return employeeJoinDt;
}
public void setEmployeeJoinDt(String employeeJoinDt) {
this.employeeJoinDt = employeeJoinDt;
}
public int getEmployeeSalary() {
return employeeSalary;
}
public void setEmployeeSalary(int employeeSalary) {
this.employeeSalary = employeeSalary;
}
#Override
public void readFields(DataInput input) throws IOException {
// TODO Auto-generated method stubt
this.employeeId=input.readInt();
this.employeeName=input.readUTF();
this.employeeDept=input.readUTF();
this.employeeJoinDt=input.readUTF();
this.employeeSalary=input.readInt();
}
#Override
public void write(DataOutput output) throws IOException {
// TODO Auto-generated method stub
output.writeInt(this.employeeId);
output.writeUTF(this.employeeName);
output.writeUTF(this.employeeDept);
output.writeUTF(this.employeeJoinDt);
output.writeInt(this.employeeSalary);
}
public int compareTo(Employee employee) {
// TODO Auto-generated method stub
if(this.employeeSalary>employee.getEmployeeSalary())
return 1;
else if(this.employeeSalary<employee.getEmployeeSalary())
return -1;
else
return 0;
}
}
this is my sort comparator class
public class SecondarySortComparator extends WritableComparator {
public SecondarySortComparator(){
super(Employee.class);
System.out.println("sort");
}
#Override
public int compare(WritableComparable a, WritableComparable b) {
// TODO Auto-generated method stub
Employee employee1 = (Employee)a;
Employee employee2 = (Employee)b;
int i = employee1.getEmployeeSalary()>employee2.getEmployeeSalary()?1:-1;
return i;
}
this is my groupo comparator class
public class SecondarySortGroupingComparator extends WritableComparator{
public SecondarySortGroupingComparator(){
super(Employee.class,true);
System.out.println("group");
}
#Override
public int compare(WritableComparable a, WritableComparable b) {
// TODO Auto-generated method stub
Employee employee1 = (Employee)a;
Employee employee2 = (Employee)b;
return employee1.getEmployeeName().compareTo(employee2.getEmployeeName());
}
}
this is the error iam getting
13/09/01 19:13:47 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
13/09/01 19:13:47 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
13/09/01 19:13:47 WARN mapred.JobClient: No job jar file set. User classes may not be found. See JobConf(Class) or JobConf#setJar(String).
13/09/01 19:13:47 INFO mapred.FileInputFormat: Total input paths to process : 1
13/09/01 19:13:47 INFO mapred.JobClient: Running job: job_local_0001
13/09/01 19:13:47 INFO util.ProcessTree: setsid exited with exit code 0
13/09/01 19:13:47 INFO mapred.Task: Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin#1b3f8f6
13/09/01 19:13:47 INFO mapred.MapTask: numReduceTasks: 1
13/09/01 19:13:47 INFO mapred.MapTask: io.sort.mb = 100
13/09/01 19:13:48 INFO mapred.JobClient: map 0% reduce 0%
13/09/01 19:13:48 INFO mapred.MapTask: data buffer = 79691776/99614720
sort13/09/01 19:13:48 INFO mapred.MapTask: record buffer = 262144/327680
1
1
1
1
13/09/01 19:13:49 INFO mapred.MapTask: Starting flush of map output
13/09/01 19:13:49 WARN mapred.LocalJobRunner: job_local_0001
java.lang.NullPointerException
at org.apache.hadoop.io.WritableComparator.compare(WritableComparator.java:96)
at org.apache.hadoop.mapred.MapTask$MapOutputBuffer.compare(MapTask.java:1111)
at org.apache.hadoop.util.QuickSort.sortInternal(QuickSort.java:70)
at org.apache.hadoop.util.QuickSort.sort(QuickSort.java:59)
at org.apache.hadoop.mapred.MapTask$MapOutputBuffer.sortAndSpill(MapTask.java:1399)
at org.apache.hadoop.mapred.MapTask$MapOutputBuffer.flush(MapTask.java:1298)
at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:437)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:372)
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:212)
13/09/01 19:13:49 INFO mapred.JobClient: Job complete: job_local_0001
13/09/01 19:13:49 INFO mapred.JobClient: Counters: 0
13/09/01 19:13:49 INFO mapred.JobClient: Job Failed: NA
Exception in thread "main" java.io.IOException: Job failed!
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:1265)
at secondarysort.JobRunner.main(JobRunner.java:31)
any suggestions on how to solve this problem?
thanks in advance
This line seems to cause the problem.
context.write(new Text(employeeName), employee);
You are emitting employee object (of type Employee) as a value but not as a key and both SecondarySortComparator and SecondarySortGroupingComparator work upon your keys not values.
Hence, the main problem is you are passing a Text as a key and that is causing the issue. You might consider passing the employee object as a Key instead of Text for the two Comparators to actually work.
You might also want to put a default constructor in your Employee class -
public Employee() { }

RestAPI and Neo4j

Can anyone help with these Neo4j (1.9) errors and the RestApi using JDK 1.6 and Heroku.
The objective of the code is only to demo the functionality of using cypher and Neo4j with Java. I understand that only the RestAPI works at Heroku.
package com.example;
import java.util.List;
import org.neo4j.graphdb.Direction;
import org.neo4j.graphdb.GraphDatabaseService;
import org.neo4j.graphdb.Node;
import org.neo4j.graphdb.Relationship;
import org.neo4j.graphdb.RelationshipType;
import org.neo4j.graphdb.Transaction;
import org.neo4j.graphdb.factory.GraphDatabaseFactory;
import org.neo4j.helpers.collection.*;
import org.neo4j.rest.graphdb.RestAPI;
import org.neo4j.rest.graphdb.RestAPIFacade;
import org.neo4j.rest.graphdb.RestGraphDatabase;
import org.neo4j.rest.graphdb.query.RestCypherQueryEngine;
import org.neo4j.rest.graphdb.util.*;
import org.springframework.stereotype.Component;
import java.util.Map;
import com.example.IDatabaseConnector;
//import org.neo4j.rest.graphdb.RestGraphDatabase;
//import org.neo4j.
#Component
//public class Neo4jDatabaseConnector implements IDatabaseConnector {
public class Neo4jDatabaseConnector implements IDatabaseConnector{
Node aNode;
Node first;
Node second;
Relationship relation;
private static GraphDatabaseService GraphDb;
private static RestAPI restAPI;
Transaction txn;
private static enum RelTypes implements RelationshipType
{
KNOWS,
USAGE
}
public void showNode( )
{
// have to add code for cypher
RestCypherQueryEngine rcqe = new RestCypherQueryEngine(restAPI);
//Then just execute your cypher statements using rcqe.query().
// int length=0;
String countNodes = "START n=node(*) RETURN count(*)";
String createNodes = "CREATE (n {name: 'Trevor Oakley'})";
rcqe.query(createNodes, null);
// QueryResult <Map<String,Object>> result = rcqe.query(countNodes, null);
//for (Map<String, Object> row:result){
// length++;
//}
//System.out.println("length="+length);
// rcqe.query(countNodes, Map<String, Object> row:result);
// rcqe.query(countNodes, Map<String,Object>:result)
// rcqe.
/* ExecutionEngine engine = new ExecutionEngine(GraphDb);
int length=-1;
try{
ExecutionResult result = engine.execute("start n=node(*) return n");
if (result == null){
length = 0;
} else
{
for (#SuppressWarnings("unused")
Map<String, Object> row:result){
length++;
}
}
} catch (NullPointerException e) {
// _log.error(e.getMessage(),e);
length=-2;
}
System.out.println("len="+length);
*/
}
public Neo4jDatabaseConnector() {
//GraphDb=new GraphDatabaseFactory().newEmbeddedDatabase(Neo4j_DBPath);
}
public int createDBRest()
{
restAPI = new RestAPIFacade(System.getenv("NEO4J_URL"),
System.getenv("NEO4J_USERNAME"),
System.getenv("NEO4J_PASSWORD"));
try{
GraphDb = new RestGraphDatabase(restAPI);
}
catch (Exception e){
return 1;
}
return 0;
}
public int createDB(String dbLoc) {
System.out.println("db loc ="+dbLoc);
int error = 0;
try{
GraphDb=new GraphDatabaseFactory().newEmbeddedDatabase(dbLoc);
//GraphDb=new GraphDatabaseFactory().newEmbeddedDatabaseBuilder(dbLoc);
System.out.println("db loc ="+dbLoc);
String stringDB = GraphDb.toString();
System.out.println("connection ="+stringDB);
}
catch (Exception e){
System.out.println("error - ");
error = 1;
}
return erro
r;
}
Problem accessing /hello. Reason:
Error reading as JSON ''
Caused by:
java.lang.RuntimeException: Error reading as JSON ''
at org.neo4j.rest.graphdb.util.JsonHelper.readJson(JsonHelper.java:57)
at org.neo4j.rest.graphdb.util.JsonHelper.jsonToSingleValue(JsonHelper.java:62)
at org.neo4j.rest.graphdb.RequestResult.toEntity(RequestResult.java:114)
at org.neo4j.rest.graphdb.RequestResult.toMap(RequestResult.java:120)
at org.neo4j.rest.graphdb.ExecutingRestRequest.toMap(ExecutingRestRequest.java:187)
at org.neo4j.rest.graphdb.ExecutingRestAPI.query(ExecutingRestAPI.java:475)
at org.neo4j.rest.graphdb.ExecutingRestAPI.query(ExecutingRestAPI.java:495)
at org.neo4j.rest.graphdb.RestAPIFacade.query(RestAPIFacade.java:233)
at org.neo4j.rest.graphdb.query.RestCypherQueryEngine.query(RestCypherQueryEngine.java:50)
at com.example.Neo4jDatabaseConnector.showNode(Neo4jDatabaseConnector.java:53)
at com.example.HelloServlet.doGet(HelloServlet.java:22)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:707)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:820)
at org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:565)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:479)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:119)
at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:521)
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:227)
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1031)
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:406)
at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:186)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:965)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:117)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:111)
at org.eclipse.jetty.server.Server.handle(Server.java:349)
at org.eclipse.jetty.server.AbstractHttpConnection.handleRequest(AbstractHttpConnection.java:449)
at org.eclipse.jetty.server.AbstractHttpConnection$RequestHandler.headerComplete(AbstractHttpConnection.java:910)
at org.eclipse.jetty.http.HttpParser.parseNext(HttpParser.java:634)
at org.eclipse.jetty.http.HttpParser.parseAvailable(HttpParser.java:230)
at org.eclipse.jetty.server.AsyncHttpConnection.handle(AsyncHttpConnection.java:76)
at org.eclipse.jetty.io.nio.SelectChannelEndPoint.handle(SelectChannelEndPoint.java:609)
at org.eclipse.jetty.io.nio.SelectChannelEndPoint$1.run(SelectChannelEndPoint.java:45)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:599)
at org.eclipse.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:534)
at java.lang.Thread.run(Thread.java:679)
Caused by: java.io.EOFException: No content to map to Object due to end of input
at org.codehaus.jackson.map.ObjectMapper._initForReading(ObjectMapper.java:2775)
at org.codehaus.jackson.map.ObjectMapper._readMapAndClose(ObjectMapper.java:2718)
at org.codehaus.jackson.map.ObjectMapper.readValue(ObjectMapper.java:1863)
at org.neo4j.rest.graphdb.util.JsonHelper.readJson(JsonHelper.java:55)
... 34 more
Caused by:
java.io.EOFException: No content to map to Object due to end of input
at org.codehaus.jackson.map.ObjectMapper._initForReading(ObjectMapper.java:2775)
at org.codehaus.jackson.map.ObjectMapper._readMapAndClose(ObjectMapper.java:2718)
at org.codehaus.jackson.map.ObjectMapper.readValue(ObjectMapper.java:1863)
at org.neo4j.rest.graphdb.util.JsonHelper.readJson(JsonHelper.java:55)
at org.neo4j.rest.graphdb.util.JsonHelper.jsonToSingleValue(JsonHelper.java:62)
at org.neo4j.rest.graphdb.RequestResult.toEntity(RequestResult.java:114)
at org.neo4j.rest.graphdb.RequestResult.toMap(RequestResult.java:120)
at org.neo4j.rest.graphdb.ExecutingRestRequest.toMap(ExecutingRestRequest.java:187)
at org.neo4j.rest.graphdb.ExecutingRestAPI.query(ExecutingRestAPI.java:475)
at org.neo4j.rest.graphdb.ExecutingRestAPI.query(ExecutingRestAPI.java:495)
at org.neo4j.rest.graphdb.RestAPIFacade.query(RestAPIFacade.java:233)
at org.neo4j.rest.graphdb.query.RestCypherQueryEngine.query(RestCypherQueryEngine.java:50)
at com.example.Neo4jDatabaseConnector.showNode(Neo4jDatabaseConnector.java:53)
at com.example.HelloServlet.doGet(HelloServlet.java:22)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:707)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:820)
at org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:565)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:479)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:119)
at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:521)
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:227)
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1031)
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:406)
at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:186)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:965)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:117)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:111)
at org.eclipse.jetty.server.Server.handle(Server.java:349)
at org.eclipse.jetty.server.AbstractHttpConnection.handleRequest(AbstractHttpConnection.java:449)
at org.eclipse.jetty.server.AbstractHttpConnection$RequestHandler.headerComplete(AbstractHttpConnection.java:910)
at org.eclipse.jetty.http.HttpParser.parseNext(HttpParser.java:634)
at org.eclipse.jetty.http.HttpParser.parseAvailable(HttpParser.java:230)
at org.eclipse.jetty.server.AsyncHttpConnection.handle(AsyncHttpConnection.java:76)
at org.eclipse.jetty.io.nio.SelectChannelEndPoint.handle(SelectChannelEndPoint.java:609)
at org.eclipse.jetty.io.nio.SelectChannelEndPoint$1.run(SelectChannelEndPoint.java:45)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:599)
at org.eclipse.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:534)
at java.lang.Thread.run(Thread.java:679)
The answer was to fill the user and password fields as shown below, in terms of the immediate error.
String neo4jURL = System.getenv("NEO4J_URL");
String user="user";
String password="password";
try{
URL url = new URL(neo4jURL);
String userInfo = url.getUserInfo();
String[] userDetails = userInfo.split(":");
user = userDetails[0];
password = userDetails[1];
} catch (MalformedURLException e) {
//
}

Error with JSF+Spring+hibernate

I am trying to integrate the JSF with spring and hibernate.
Following is my code:
LeaveBean.java
package com.nagra.bean;
import java.io.Serializable;
import java.sql.SQLException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Properties;
import javax.annotation.Resource;
import javax.faces.bean.ManagedBean;
import javax.faces.bean.SessionScoped;
import javax.mail.Message;
import javax.mail.MessagingException;
import javax.mail.Session;
import javax.mail.Transport;
import javax.mail.internet.InternetAddress;
import javax.mail.internet.MimeMessage;
import org.hibernate.HibernateException;
import com.nagra.leaveapp.BO.LeaveInfoBo;
import com.nagra.leaveapp.model.LeaveInfo;
#ManagedBean(name="customer")
#SessionScoped
public class LeaveBean implements Serializable{
/**
*
*/
//private LeaveAppDo leaveAppDo=new LeaveAppDo();
LeaveInfoBo leaveinfoBo;
public LeaveInfoBo getLeaveinfoBo() {
return leaveinfoBo;
}
public void setLeaveinfoBo(LeaveInfoBo leaveinfoBo) {
this.leaveinfoBo = leaveinfoBo;
}
private String total_sick_leave;
private String total_paidoff_leave;
private String start_date;
private String end_date;
private String reason;
private String status;
private Date secondDate;
public Date getSecondDate() {
return secondDate;
}
public void setSecondDate(Date secondDate) {
this.secondDate = secondDate;
}
public String getTotal_sick_leave() {
return total_sick_leave;
}
public void setTotal_sick_leave(String total_sick_leave) {
this.total_sick_leave = total_sick_leave;
}
public String getTotal_paidoff_leave() {
return total_paidoff_leave;
}
public void setTotal_paidoff_leave(String total_paidoff_leave) {
this.total_paidoff_leave = total_paidoff_leave;
}
public String getStart_date() {
return start_date;
}
public void setStart_date(String start_date) {
this.start_date = start_date;
}
public String getEnd_date() {
return end_date;
}
public void setEnd_date(String end_date) {
this.end_date = end_date;
}
public String getReason() {
return reason;
}
public void setReason(String reason) {
this.reason = reason;
}
public String getStatus() {
return status;
}
public void setStatus(String status) {
this.status = status;
}
private static final long serialVersionUID = 1L;
//resource injection
#Resource(name="jdbc/leaveapp")
//private DataSource ds;
//if resource inject is not support, you still can get it manually.
/*public CustomerBean(){
try {
Context ctx = new InitialContext();
ds = (DataSource)ctx.lookup("jdbc:mysql://localhost:3306/leaveapp");
} catch (NamingException e) {
e.printStackTrace();
}
}*/
public List<LeaveInfo> getCustomerList() throws SQLException{
System.out.println("Getting In " );
return null;
// return leaveinfoBo.retrieveData();
}
public String addCustomer() throws HibernateException
{
/* String url = "jdbc:mysql://localhost:3306/leaveapp";
Connection con = DriverManager.getConnection(url,"root","root");
System.out.println(con);
if(con==null)
throw new SQLException("Can't get database connection");
String sql="insert into leaveinformation(start_date,end_date,reason,status)values(?,?,?,?)";
java.util.Date utilDate = new Date();
// Convert it to java.sql.Date
java.sql.Date date = new java.sql.Date(utilDate.getTime());
java.sql.Timestamp sqlDate = new java.sql.Timestamp(new java.util.Date().getTime());*/
DateFormat formatter ;
try
{
System.out.println("Start date " + getStart_date());
formatter = new SimpleDateFormat("yyyy-MM-dd");
Date startDate = formatter.parse(LeaveBean.this.getStart_date());
System.out.println(startDate);
Date endDate = formatter.parse(LeaveBean.this.getEnd_date());
// Days d = Days.daysBetween(startDate,endDate);
// int days = d.getDays();
#SuppressWarnings("deprecation")
java.sql.Date startdate=new java.sql.Date(startDate.getDate());
#SuppressWarnings("deprecation")
java.sql.Date enddate=new java.sql.Date(endDate.getDate());
/* System.out.println("Today is " +date );
PreparedStatement ps
= con.prepareStatement(sql);
ps.setDate(1,startdate,Calendar.getInstance(Locale.ENGLISH));
ps.setDate(2,enddate,Calendar.getInstance(Locale.ENGLISH));
ps.setString(3,CustomerBean.this.reason);
ps.setString(4,"Waiting");*/
LeaveInfo leave = new LeaveInfo();
System.out.println("Entering addCustomer() ");
leave.setEnd_date(enddate);
leave.setStart_date(startdate);
leave.setReason(reason);
leave.setStatus("Waiting");
System.out.println(leave.toString());
leaveinfoBo.save(leave);
clearall();
// Insert the row
/* ps.executeUpdate();
con.close();*/
sendmail();
}
catch(Exception e)
{
System.out.println("There is an error " + e.getStackTrace().toString());
e.printStackTrace();
}
return "";
}
private void clearall() {
// TODO Auto-generated method stub
setEnd_date("");
setStart_date("");
setReason("");
setStatus("");
}
public String sendmail()
{
// Recipient's email ID needs to be mentioned.
String to = "nsbharathi88#gmail.com";
// Sender's email ID needs to be mentioned
String from = "web#gmail.com";
// Assuming you are sending email from localhost
String host = "localhost";
// Get system properties
Properties properties = System.getProperties();
// Setup mail server
properties.setProperty("mail.smtp.host", host);
// Get the default Session object.
Session session = Session.getDefaultInstance(properties);
try{
// Create a default MimeMessage object.
MimeMessage message = new MimeMessage(session);
// Set From: header field of the header.
message.setFrom(new InternetAddress(from));
// Set To: header field of the header.
message.addRecipient(Message.RecipientType.TO,
new InternetAddress(to));
// Set Subject: header field
message.setSubject("This is the Subject Line!");
// Now set the actual message
message.setText("This is actual message");
// Send message
Transport.send(message);
System.out.println("Sent message successfully....");
}catch (MessagingException mex) {
mex.printStackTrace();
}
return "";
}
}
applicationContext.xml
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-2.5.xsd">
<!-- Database Configuration -->
<import resource="classes/spring/database/DataSource.xml"/>
<import resource="classes/spring/database/Hibernate.xml"/>
<!-- Beans Declaration -->
<import resource="classes/spring/bean/LeaveApp.xml"/>
</beans>
I am getting the following error when i run this on the tomcat server:
Start date 2012-02-03
java.lang.NullPointerException
at com.nagra.bean.LeaveBean.addCustomer(LeaveBean.java:167)
Fri Feb 03 00:00:00 IST 2012
Entering addCustomer()
LeaveInfo [id=0, total_sick_leave=15, total_paidoff_leave=15, start_date=1970-01- 01, end_date=1970-01-01, reason=cfklsn, status=Waiting]
There is an error [Ljava.lang.StackTraceElement;#fc644a
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.el.parser.AstValue.invoke(AstValue.java:262)
at org.apache.el.MethodExpressionImpl.invoke(MethodExpressionImpl.java:278)
at com.sun.faces.facelets.el.TagMethodExpression.invoke(TagMethodExpression.java:105)
at javax.faces.component.MethodBindingMethodExpressionAdapter.invoke(MethodBindingMethodExpressionAdapter.java:88)
at com.sun.faces.application.ActionListenerImpl.processAction(ActionListenerImpl.java:102)
at javax.faces.component.UICommand.broadcast(UICommand.java:315)
at javax.faces.component.UIViewRoot.broadcastEvents(UIViewRoot.java:794)
at javax.faces.component.UIViewRoot.processApplication(UIViewRoot.java:1259)
at com.sun.faces.lifecycle.InvokeApplicationPhase.execute(InvokeApplicationPhase.java:81)
at com.sun.faces.lifecycle.Phase.doPhase(Phase.java:101)
at com.sun.faces.lifecycle.LifecycleImpl.execute(LifecycleImpl.java:118)
at javax.faces.webapp.FacesServlet.service(FacesServlet.java:593)
at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:304)
at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:210)
at org.apache.catalina.core.StandardWrapperValve.invoke(StandardWrapperValve.java:240)
at org.apache.catalina.core.StandardContextValve.invoke(StandardContextValve.java:164)
at org.apache.catalina.authenticator.AuthenticatorBase.invoke(AuthenticatorBase.java:462)
at org.apache.catalina.core.StandardHostValve.invoke(StandardHostValve.java:164)
at org.apache.catalina.valves.ErrorReportValve.invoke(ErrorReportValve.java:100)
at org.apache.catalina.valves.AccessLogValve.invoke(AccessLogValve.java:562)
at org.apache.catalina.core.StandardEngineValve.invoke(StandardEngineValve.java:118)
at org.apache.catalina.connector.CoyoteAdapter.service(CoyoteAdapter.java:395)
at org.apache.coyote.http11.Http11Processor.process(Http11Processor.java:250)
at org.apache.coyote.http11.Http11Protocol$Http11ConnectionHandler.process(Http11Protocol.java:188)
at org.apache.coyote.http11.Http11Protocol$Http11ConnectionHandler.process(Http11Protocol.java:166)
at org.apache.tomcat.util.net.JIoEndpoint$SocketProcessor.run(JIoEndpoint.java:302)
at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
at java.lang.Thread.run(Thread.java:619)
I think, your startdate and enddate is of type String, Change it to java.util.Date and try
start_date=1970-01- 01
Your start date has a space in it. It wont parse.

Resources