import java.io.*;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.lib.db.DBWritable;
public class DBInputWritable implements Writable, DBWritable
{
String symbol;
String date;
double open;
double high;
double low;
double close;
int volume;
double adjClose;
//private final static SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
public void readFields(DataInput in) throws IOException
{
symbol=in.readLine();
date=in.readLine();
open=in.readDouble();
high=in.readDouble();
low=in.readDouble();
close=in.readDouble();
volume=in.readInt();
adjClose=in.readDouble();
}
public void readFields(ResultSet rs) throws SQLException
{
symbol = rs.getString(2);
date = rs.getString(3);
open = rs.getDouble(4);
high = rs.getDouble(5);
low = rs.getDouble(6);
close = rs.getDouble(7);
volume = rs.getInt(8);
adjClose = rs.getDouble(9);
}
public void write(DataOutput out) throws IOException
{
}
public void write( PreparedStatement ps) throws SQLException
{
}
public String getSymbol()
{
return symbol;
}
public String getDate()
{
return date;
}
public double getOpen()
{
return open;
}
public double getHigh()
{
return high;
}
public double getLow()
{
return low;
}
public double getClose()
{
return close;
}
public int getVolume()
{
return volume;
}
public double getAdjClose()
{
return adjClose;
}
}
public class DBOutputWritable implements Writable, DBWritable
{
String symbol;
String date;
double open;
double high;
double low;
double close;
int volume;
double adjClose;
;
public DBOutputWritable(String symbol,String date,String open,String high,String low,String close,String volume,String adjClose)
{
this.symbol=symbol;
this.date=date;
this.open=Double.parseDouble(open);
this.high=Double.parseDouble(high);
this.low=Double.parseDouble(low);
this.close=Double.parseDouble(close);
this.volume=Integer.parseInt(volume);
this.adjClose=Double.parseDouble(adjClose);
}
public void readFields(DataInput in) throws IOException
{
}
public void readFields(ResultSet rs) throws SQLException
{
}
public void write(DataOutput out) throws IOException
{
out.writeChars(symbol);
out.writeChars(date);
out.writeDouble(open);
out.writeDouble(high);
out.writeDouble(low);
out.writeDouble(close);
out.writeInt(volume);
out.writeDouble(adjClose);
}
public void write(PreparedStatement ps) throws SQLException
{
ps.setString(1,symbol);
ps.setString(2,date);
ps.setDouble(3,open);
ps.setDouble(4,high);
ps.setDouble(5,low);
ps.setDouble(6,close);
ps.setInt(7,volume);
ps.setDouble(8,adjClose);
}
}
public class Map extends Mapper<LongWritable,DBInputWritable,Text,Text>
{
public void map(LongWritable key, DBInputWritable value, Context ctx)
{
try
{
Text set;
set= new Text(value.getDate());
String line = value.getSymbol()+","+value.getDate()+","+value.getOpen()+","+value.getHigh()+","+value.getLow()+","+value.getClose()+","+value.getVolume()+","+value.getAdjClose();
ctx.write(set,new Text(line));
}
catch(IOException e)
{
e.printStackTrace();
}
catch(InterruptedException e)
{
e.printStackTrace();
}
}
}
public class Reduce extends Reducer<Text, Text, DBOutputWritable, NullWritable>
{
public void reduce(Text key, Text value, Context ctx)
{
try
{
String []line= value.toString().split(",");
String sym=line[0];
String dt=line[1];
String opn=line[2];
String hgh=line[3];
String lw=line[4];
String cls=line[5];
String vlm=line[6];
String adcls=line[7];
ctx.write(new DBOutputWritable(sym,dt,opn,hgh,lw,cls,vlm,adcls),NullWritable.get());
}
catch(IOException e)
{
e.printStackTrace();
}
catch(InterruptedException e)
{
e.printStackTrace();
}
}
}
public class Main
{
public static void main(String [] args) throws Exception
{
Configuration conf = new Configuration();
DBConfiguration.configureDB(conf,
"com.mysql.jdbc.Driver", //Driver Class
"jdbc:mysql://192.168.198.128:3306/testDb", //DB URL
"sqoopuser", //USERNAME
"passphrase"); //PASSWORD
Job job = new Job(conf);
job.setJarByClass(Main.class);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(DBOutputWritable.class);
job.setOutputValueClass(NullWritable.class);
job.setInputFormatClass(DBInputFormat.class);
job.setOutputFormatClass(DBOutputFormat.class);
DBInputFormat.setInput(
job,
DBInputWritable.class,
"aapldata", //input table name
null,
null,
new String[] {"stock","symbol", "date" ,"open", "high", "low", "close", "volume", "adjClose"}
//Table Columns
);
DBOutputFormat.setOutput(
job,
"aapldatanew", //Output Table Name
new String[] {"symbol", "date" ,"open", "high", "low", "close", "volume", "adjClose"}
//Table Columns
);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
I think Code is picture perfect. But still I encounter below error:
14/11/26 22:09:47 INFO mapred.JobClient: map 100% reduce 0%
14/11/26 22:09:55 INFO mapred.JobClient: map 100% reduce 33%
14/11/26 22:09:58 INFO mapred.JobClient: Task Id : attempt_201411262208_0001_r_000000_2, Status : FAILED
java.lang.ClassCastException: org.apache.hadoop.io.Text cannot be cast to org.apache.hadoop.mapreduce.lidb.DBWritable
at org.apache.hadoop.mapreduce.lib.db.DBOutputFormat$DBRecordWriter.write(DBOutputFormat.java:66
at org.apache.hadoop.mapred.ReduceTask$NewTrackingRecordWriter.write(ReduceTask.java:586)
at org.apache.hadoop.mapreduce.TaskInputOutputContext.write(TaskInputOutputContext.java:80)
at org.apache.hadoop.mapreduce.Reducer.reduce(Reducer.java:156)
at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:177)
at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:649)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:418)
at org.apache.hadoop.mapred.Child$4.run(Child.java:255)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1190)
at org.apache.hadoop.mapred.Child.main(Child.java:249)
Need your valuable Insights.
In your map class get the input as text instead of DBInputWritable:
public class Map extends Mapper {
public void map(LongWritable key,Text value, Context ctx)
I can find 2 problems:
mapper output key,value classes not matching with the job configuration. Please chek it.
mapper while jobconfigured. Please correct as your need. I approach the 2nd problem considering mapper key,value pair is your right choice.
you didn't override reduce method !!
As per your job configurationThe signature should be:
public void reduce(Text key, Iterable<Text> values, Context context){
//...your code
}
Explanation for your exception
Since you didn't override Reducer's reduce, the reducer will reduce with the default implementation(which is called identity reducer).Source code:
protected void reduce(KEYIN key, Iterable<VALUEIN> values, Context context
) throws IOException, InterruptedException {
for(VALUEIN value: values) {
context.write((KEYOUT) key, (VALUEOUT) value);
}
}
As specified in the source code it simply iterate over values and write it using Output key value calsses.But in your case intermmediate pairs(i.e IntWritable,Text) do not match with pairs of DBoutputFormat.
HTH !
Related
I am using Hadoop 2.7 and I have got an issue when using a custom Writable "TextPair" (page 104 of the Definitive Guide). Basically, my program works fine when I am using just Text whereas it outputs "test.TextTuple#3b86249a test.TextTuple#63cd18fd" when using the TextPair.
Please, Any idea of what is wrong with my code (below)?
============
Mapper1:
public class KWMapper extends Mapper<LongWritable, Text, TextTuple, TextTuple> {
#Override
public void map(LongWritable k, Text v, Mapper.Context c) throws IOException, InterruptedException {
String keywordRelRecord[] = v.toString().split(",");
String subTopicID = keywordRelRecord[0];
String paperID = keywordRelRecord[1];
//set the KEY
TextTuple key = new TextTuple();
key.setNaturalKey(new Text(subTopicID));
key.setSecondaryKey(new Text("K"));
//set the VALUE
TextTuple value = new TextTuple();
value.setNaturalKey(new Text(paperID));
value.setSecondaryKey(new Text("K"));
c.write(key, value);
}
Mapper2:
public class TDMapper extends Mapper<LongWritable, Text, TextTuple, TextTuple> {
#Override
public void map(LongWritable k, Text v, Mapper.Context c) throws IOException, InterruptedException {
String topicRecord[] = v.toString().split(",");
String superTopicID = topicRecord[0];
String subTopicID = topicRecord[1].substring(1, topicRecord[1].length() - 1);
TextTuple key = new TextTuple();
key.setNaturalKey(new Text(subTopicID));
key.setSecondaryKey(new Text("T"));
TextTuple value = new TextTuple();
value.setNaturalKey(new Text(superTopicID));
value.setSecondaryKey(new Text("T"));
c.write(key, value);
}
REDUCER :
public class TDKRReducer extends Reducer<TextTuple, TextTuple, Text, Text>{
public void reduce(TextTuple k, Iterable<TextTuple> values, Reducer.Context c) throws IOException, InterruptedException{
for (TextTuple val : values) {
c.write(k.getNaturalKey(), val.getNaturalKey());
}
}
}
DRIVER:
public class TDDriver {
public static void main(String args[]) throws IOException, InterruptedException, ClassNotFoundException {
// This class support the user for the configuration of the execution;
Configuration confStage1 = new Configuration();
Job job1 = new Job(confStage1, "TopDecKeywordRel");
// Setting the driver class
job1.setJarByClass(TDDriver.class);
// Setting the input Files and processing them using the corresponding mapper class
MultipleInputs.addInputPath(job1, new Path(args[0]), TextInputFormat.class, TDMapper.class);
MultipleInputs.addInputPath(job1, new Path(args[1]), TextInputFormat.class, KWMapper.class);
job1.setMapOutputKeyClass(TextTuple.class);
job1.setMapOutputValueClass(TextTuple.class);
// Setting the Reducer Class;
job1.setReducerClass(TDKRReducer.class);
// Setting the output class for the Key-value pairs
job1.setOutputKeyClass(Text.class);
job1.setOutputValueClass(Text.class);
// Setting the output file
Path outputPA = new Path(args[2]);
FileOutputFormat.setOutputPath(job1, outputPA);
// Submitting the Job Monitoring the execution of the Job
System.exit(job1.waitForCompletion(true) ? 0 : 1);
//conf.setPartitionerClass(CustomPartitioner.class);
}
}
CUSTOM VARIABLE
public class TextTuple implements Writable, WritableComparable<TextTuple> {
private Text naturalKey;
private Text secondaryKey;
public TextTuple() {
this.naturalKey = new Text();
this.secondaryKey = new Text();
}
public void setNaturalKey(Text naturalKey) {
this.naturalKey = naturalKey;
}
public void setSecondaryKey(Text secondaryKey) {
this.secondaryKey = secondaryKey;
}
public Text getNaturalKey() {
return naturalKey;
}
public Text getSecondaryKey() {
return secondaryKey;
}
#Override
public void write(DataOutput out) throws IOException {
naturalKey.write(out);
secondaryKey.write(out);
}
#Override
public void readFields(DataInput in) throws IOException {
naturalKey.readFields(in);
secondaryKey.readFields(in);
}
//This comparator controls the sort order of the keys.
#Override
public int compareTo(TextTuple o) {
// comparing the naturalKey
int compareValue = this.naturalKey.compareTo(o.naturalKey);
if (compareValue == 0) {
compareValue = this.secondaryKey.compareTo(o.secondaryKey);
}
return -1 * compareValue;
}
}
I have the following classes for MR jobs but when i run the job the job is failing with the below exception kindly suggest.
public class MongoKey implements WritableComparable<MongoKey> {
...
private Text name;
private Text place;
public MongoKey() {
this.name = new Text();
this.place = new Text();
}
public MongoKey(Text name, Text place) {
this.name = name;
this.place = place;
}
public void readFields(DataInput in) throws IOException {
name.readFields(in);
place.readFields(in);
}
public void write(DataOutput out) throws IOException {
name.write(out);
place.write(out);
}
public int compareTo(MongoKey o) {
MongoKey other = (MongoKey)o;
int cmp = name.compareTo(other.name);
if(cmp != 0){
return cmp;
}
return place.compareTo(other.place);
}
}
public class MongoValue implements Writable {
...
public void readFields(DataInput in) throws IOException {
profession.readFields(in);
}
public void write(DataOutput out) throws IOException {
profession.write(out);
}
}
public class MongoReducer extends Reducer<MongoKey, MongoValue, MongoKey, BSONWritable> {
...
context.write(key, new BSONWritable(output)); // line 41
}
public class MongoHadoopJobRunner extends Configured implements Tool {
public int run(String[] args) throws Exception {
if (args.length != 2) {
System.out.println("usage: [input] [output]");
System.exit(-1);
}
Configuration conf = getConf();
for (String arg : args)
System.out.println(arg);
GenericOptionsParser parser = new GenericOptionsParser(conf, args);
conf.set("mongo.output.uri", "mongodb://localhost/demo.logs_aggregate");
MongoConfigUtil.setOutputURI(conf, "mongodb://localhost/demo.logs_aggregate");
MongoConfigUtil.setOutputFormat(conf, MongoOutputFormat.class);
final Job job = new Job(conf, "mongo_hadoop");
job.setOutputFormatClass(MongoOutputFormat.class);
// Job job = new Job();
job.setJarByClass(MongoHadoopJobRunner.class);
// job.setJobName("mongo_hadoop");
job.setNumReduceTasks(1);
job.setMapperClass(MongoMapper.class);
job.setReducerClass(MongoReducer.class);
job.setMapOutputKeyClass(MongoKey.class);
job.setMapOutputValueClass(MongoValue.class);
job.setOutputKeyClass(MongoKey.class);
job.setOutputValueClass(BSONWritable.class);
job.setInputFormatClass(MongoInputFormat.class);
for (String arg2 : parser.getRemainingArgs()) {
System.out.println("remaining: " + arg2);
}
Path inPath = new Path(parser.getRemainingArgs()[0]);
MongoInputFormat.addInputPath(job, inPath);
job.waitForCompletion(true);
return 0;
}
public static void main(String[] pArgs) throws Exception {
Configuration conf = new Configuration();
for (String arg : pArgs) {
System.out.println(arg);
}
GenericOptionsParser parser = new GenericOptionsParser(conf, pArgs);
for (String arg2 : parser.getRemainingArgs()) {
System.out.println("ree" + arg2);
}
System.exit(ToolRunner.run(conf, new MongoHadoopJobRunner(), parser
.getRemainingArgs()));
}
}
With the following exception
java.lang.Exception: java.lang.IllegalArgumentException: can't serialize class com.name.custom.MongoKey
...
...
at com.mongodb.hadoop.output.MongoRecordWriter.write(MongoRecordWriter.java:93)
at org.apache.hadoop.mapred.ReduceTask$NewTrackingRecordWriter.write(ReduceTask.java:558)
at org.apache.hadoop.mapreduce.task.TaskInputOutputContextImpl.write(TaskInputOutputContextImpl.java:89)
at org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer$Context.write(WrappedReducer.java:105)
at com.name.custom.MongoReducer.reduce(MongoReducer.java:41)
at com.name.custom.MongoReducer.reduce(MongoReducer.java:11)
It seems there should not be any issue with the code but why its unable to serialize the fields i am totally clueless.
Thanks very much in advance
As i see from MongoRecordWriter source code it does not support arbitrary WritableComparable object as key. You can use one of these classes as key: BSONWritable, BSONObject, Text, UTF8, simple wrappers like IntWritable. Also i think you can use Serializable object as key. So i can suggest you two workarounds:
Make your MongoKey serializable (implements Serializable, implement writeObject, readObject methods).
Use one of supported classes as key, for example you can use Text as key: Text key = new Text(name.toString() + "\t" + place.toString());
This:
java.lang.Exception: java.lang.IllegalArgumentException: can't serialize class com.name.custom.MongoKey
exception is raised because MongoKey doesn't implement java.io.Serializable.
Add the Serializable to your class declaration
Can somebody give one good example link for mapreduce with Hbase? My requirement is run mapreduce on hdfs file and store reducer output to hbase table. Mapper input will be hdfs file and output will be Text,IntWritable key value pairs. Reducers output will be Put object ie add reducer Iterable IntWritable values and store in hbase table.
Here is the code which will solve your problem
Driver
HBaseConfiguration conf = HBaseConfiguration.create();
Job job = new Job(conf,"JOB_NAME");
job.setJarByClass(yourclass.class);
job.setMapperClass(yourMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Intwritable.class);
FileInputFormat.setInputPaths(job, new Path(inputPath));
TableMapReduceUtil.initTableReducerJob(TABLE,
yourReducer.class, job);
job.setReducerClass(yourReducer.class);
job.waitForCompletion(true);
Mapper&Reducer
class yourMapper extends Mapper<LongWritable, Text, Text,IntWritable> {
//#overide map()
}
class yourReducer
extends
TableReducer<Text, IntWritable,
ImmutableBytesWritable>
{
//#override rdeuce()
}
**Ckeck the bellow code that works fine for me with Phoenix Hbase and map reduce **
This program will read data from Hbase table and inset result in to another table after map-reduce job .
Table :-> STOCK ,STOCK_STATS
StockComputationJob.java
public static class StockMapper extends Mapper<NullWritable, StockWritable, Text , DoubleWritable> {
private Text stock = new Text();
private DoubleWritable price = new DoubleWritable ();
#Override
protected void map(NullWritable key, StockWritable stockWritable, Context context) throws IOException, InterruptedException {
double[] recordings = stockWritable.getRecordings();
final String stockName = stockWritable.getStockName();
System.out.println("Map-"+recordings);
double maxPrice = Double.MIN_VALUE;
for(double recording : recordings) {
System.out.println("M-"+key+"-"+recording);
if(maxPrice < recording) {
maxPrice = recording;
}
}
System.out.println(stockName+"--"+maxPrice);
stock.set(stockName);
price.set(maxPrice);
context.write(stock,price);
}
}
public static void main(String[] args) throws Exception {
final Configuration conf = new Configuration();
HBaseConfiguration.addHbaseResources(conf);
conf.set(HConstants.ZOOKEEPER_QUORUM, zkUrl);
final Job job = Job.getInstance(conf, "stock-stats-job");
// We can either specify a selectQuery or ignore it when we would like to retrieve all the columns
final String selectQuery = "SELECT STOCK_NAME,RECORDING_YEAR,RECORDINGS_QUARTER FROM STOCK ";
// StockWritable is the DBWritable class that enables us to process the Result of the above query
PhoenixMapReduceUtil.setInput(job,StockWritable.class,"STOCK",selectQuery);
// Set the target Phoenix table and the columns
PhoenixMapReduceUtil.setOutput(job, "STOCK_STATS", "STOCK_NAME,MAX_RECORDING");
job.setMapperClass(StockMapper.class);
job.setReducerClass(StockReducer.class);
job.setOutputFormatClass(PhoenixOutputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(DoubleWritable.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(StockWritable.class);
TableMapReduceUtil.addDependencyJars(job);
job.waitForCompletion(true);
}
}
StockReducer.java
public class StockReducer extends Reducer<Text, DoubleWritable, NullWritable , StockWritable> {
protected void reduce(Text key, Iterable<DoubleWritable> recordings, Context context) throws IOException, InterruptedException {
double maxPrice = Double.MIN_VALUE;
System.out.println(recordings);
for(DoubleWritable recording : recordings) {
System.out.println("R-"+key+"-"+recording);
if(maxPrice < recording.get()) {
maxPrice = recording.get();
}
}
final StockWritable stock = new StockWritable();
stock.setStockName(key.toString());
stock.setMaxPrice(maxPrice);
System.out.println(key+"--"+maxPrice);
context.write(NullWritable.get(),stock);
}
}
StockWritable.java
public class StockWritable implements DBWritable,Writable {
private String stockName;
private int year;
private double[] recordings;
private double maxPrice;
public void readFields(DataInput input) throws IOException {
}
public void write(DataOutput output) throws IOException {
}
public void readFields(ResultSet rs) throws SQLException {
stockName = rs.getString("STOCK_NAME");
setYear(rs.getInt("RECORDING_YEAR"));
final Array recordingsArray = rs.getArray("RECORDINGS_QUARTER");
setRecordings((double[])recordingsArray.getArray());
}
public void write(PreparedStatement pstmt) throws SQLException {
pstmt.setString(1, stockName);
pstmt.setDouble(2, maxPrice);
}
public int getYear() {
return year;
}
public void setYear(int year) {
this.year = year;
}
public double[] getRecordings() {
return recordings;
}
public void setRecordings(double[] recordings) {
this.recordings = recordings;
}
public double getMaxPrice() {
return maxPrice;
}
public void setMaxPrice(double maxPrice) {
this.maxPrice = maxPrice;
}
public String getStockName() {
return stockName;
}
public void setStockName(String stockName) {
this.stockName = stockName;
}
}
I have this hadoop map reduce code that works on graph data (in adjacency list form) and kind of similar to in-adjacency list to out-adjacency list transformation algorithms. The main MapReduce Task code is following:
public class TestTask extends Configured
implements Tool {
public static class TTMapper extends MapReduceBase
implements Mapper<Text, TextArrayWritable, Text, NeighborWritable> {
#Override
public void map(Text key,
TextArrayWritable value,
OutputCollector<Text, NeighborWritable> output,
Reporter reporter) throws IOException {
int numNeighbors = value.get().length;
double weight = (double)1 / numNeighbors;
Text[] neighbors = (Text[]) value.toArray();
NeighborWritable me = new NeighborWritable(key, new DoubleWritable(weight));
for (int i = 0; i < neighbors.length; i++) {
output.collect(neighbors[i], me);
}
}
}
public static class TTReducer extends MapReduceBase
implements Reducer<Text, NeighborWritable, Text, Text> {
#Override
public void reduce(Text key,
Iterator<NeighborWritable> values,
OutputCollector<Text, Text> output,
Reporter arg3)
throws IOException {
ArrayList<NeighborWritable> neighborList = new ArrayList<NeighborWritable>();
while(values.hasNext()) {
neighborList.add(values.next());
}
NeighborArrayWritable neighbors = new NeighborArrayWritable
(neighborList.toArray(new NeighborWritable[0]));
Text out = new Text(neighbors.toString());
output.collect(key, out);
}
}
#Override
public int run(String[] arg0) throws Exception {
JobConf conf = Util.getMapRedJobConf("testJob",
SequenceFileInputFormat.class,
TTMapper.class,
Text.class,
NeighborWritable.class,
1,
TTReducer.class,
Text.class,
Text.class,
TextOutputFormat.class,
"test/in",
"test/out");
JobClient.runJob(conf);
return 0;
}
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new TestTask(), args);
System.exit(res);
}
}
The auxiliary code is following:
TextArrayWritable:
public class TextArrayWritable extends ArrayWritable {
public TextArrayWritable() {
super(Text.class);
}
public TextArrayWritable(Text[] values) {
super(Text.class, values);
}
}
NeighborWritable:
public class NeighborWritable implements Writable {
private Text nodeId;
private DoubleWritable weight;
public NeighborWritable(Text nodeId, DoubleWritable weight) {
this.nodeId = nodeId;
this.weight = weight;
}
public NeighborWritable () { }
public Text getNodeId() {
return nodeId;
}
public DoubleWritable getWeight() {
return weight;
}
public void setNodeId(Text nodeId) {
this.nodeId = nodeId;
}
public void setWeight(DoubleWritable weight) {
this.weight = weight;
}
#Override
public void readFields(DataInput in) throws IOException {
nodeId = new Text();
nodeId.readFields(in);
weight = new DoubleWritable();
weight.readFields(in);
}
#Override
public void write(DataOutput out) throws IOException {
nodeId.write(out);
weight.write(out);
}
public String toString() {
return "NW[nodeId=" + (nodeId != null ? nodeId.toString() : "(null)") +
",weight=" + (weight != null ? weight.toString() : "(null)") + "]";
}
public boolean equals(Object o) {
if (!(o instanceof NeighborWritable)) {
return false;
}
NeighborWritable that = (NeighborWritable)o;
return (nodeId.equals(that.getNodeId()) && (weight.equals(that.getWeight())));
}
}
and the Util class:
public class Util {
public static JobConf getMapRedJobConf(String jobName,
Class<? extends InputFormat> inputFormatClass,
Class<? extends Mapper> mapperClass,
Class<?> mapOutputKeyClass,
Class<?> mapOutputValueClass,
int numReducer,
Class<? extends Reducer> reducerClass,
Class<?> outputKeyClass,
Class<?> outputValueClass,
Class<? extends OutputFormat> outputFormatClass,
String inputDir,
String outputDir) throws IOException {
JobConf conf = new JobConf();
if (jobName != null)
conf.setJobName(jobName);
conf.setInputFormat(inputFormatClass);
conf.setMapperClass(mapperClass);
if (numReducer == 0) {
conf.setNumReduceTasks(0);
conf.setOutputKeyClass(outputKeyClass);
conf.setOutputValueClass(outputValueClass);
conf.setOutputFormat(outputFormatClass);
} else {
// may set actual number of reducers
// conf.setNumReduceTasks(numReducer);
conf.setMapOutputKeyClass(mapOutputKeyClass);
conf.setMapOutputValueClass(mapOutputValueClass);
conf.setReducerClass(reducerClass);
conf.setOutputKeyClass(outputKeyClass);
conf.setOutputValueClass(outputValueClass);
conf.setOutputFormat(outputFormatClass);
}
// delete the existing target output folder
FileSystem fs = FileSystem.get(conf);
fs.delete(new Path(outputDir), true);
// specify input and output DIRECTORIES (not files)
FileInputFormat.addInputPath(conf, new Path(inputDir));
FileOutputFormat.setOutputPath(conf, new Path(outputDir));
return conf;
}
}
My input is following graph: (in binary format, here I am giving the text format)
1 2
2 1,3,5
3 2,4
4 3,5
5 2,4
According to the logic of the code the output should be:
1 NWArray[size=1,{NW[nodeId=2,weight=0.3333333333333333],}]
2 NWArray[size=3,{NW[nodeId=5,weight=0.5],NW[nodeId=3,weight=0.5],NW[nodeId=1,weight=1.0],}]
3 NWArray[size=2,{NW[nodeId=2,weight=0.3333333333333333],NW[nodeId=4,weight=0.5],}]
4 NWArray[size=2,{NW[nodeId=5,weight=0.5],NW[nodeId=3,weight=0.5],}]
5 NWArray[size=2,{NW[nodeId=2,weight=0.3333333333333333],NW[nodeId=4,weight=0.5],}]
But the output is coming as:
1 NWArray[size=1,{NW[nodeId=2,weight=0.3333333333333333],}]
2 NWArray[size=3,{NW[nodeId=5,weight=0.5],NW[nodeId=5,weight=0.5],NW[nodeId=5,weight=0.5],}]
3 NWArray[size=2,{NW[nodeId=2,weight=0.3333333333333333],NW[nodeId=2,weight=0.3333333333333333],}]
4 NWArray[size=2,{NW[nodeId=5,weight=0.5],NW[nodeId=5,weight=0.5],}]
5 NWArray[size=2,{NW[nodeId=2,weight=0.3333333333333333],NW[nodeId=2,weight=0.3333333333333333],}]
I cannot understand the reason why the expected output is not coming out. Any help will be appreciated.
Thanks.
You're falling foul of object re-use
while(values.hasNext()) {
neighborList.add(values.next());
}
values.next() will return the same object reference, but the underlying contents of that object will change for each iteration (the readFields method is called to re-populate the contents)
Suggest you amend to (you'll need to obtain the Configuration conf variable from a setup method, unless you can obtain it from the Reporter or OutputCollector - sorry i don't use the old API)
while(values.hasNext()) {
neighborList.add(
ReflectionUtils.copy(conf, values.next(), new NeighborWritable());
}
But I still can't understand why my unit test passed then. Here is the code -
public class UWLTInitReducerTest {
private Text key;
private Iterator<NeighborWritable> values;
private NeighborArrayWritable nodeData;
private TTReducer reducer;
/**
* Set up the states for calling the map function
*/
#Before
public void setUp() throws Exception {
key = new Text("1001");
NeighborWritable[] neighbors = new NeighborWritable[4];
for (int i = 0; i < 4; i++) {
neighbors[i] = new NeighborWritable(new Text("300" + i), new DoubleWritable((double) 1 / (1 + i)));
}
values = Arrays.asList(neighbors).iterator();
nodeData = new NeighborArrayWritable(neighbors);
reducer = new TTReducer();
}
/**
* Test method for InitModelMapper#map - valid input
*/
#Test
public void testMapValid() {
// mock the output object
OutputCollector<Text, UWLTNodeData> output = mock(OutputCollector.class);
try {
// call the API
reducer.reduce(key, values, output, null);
// in order (sequential) verification of the calls to output.collect()
verify(output).collect(key, nodeData);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
Why didn't this code catch the bug?
I have a situation in which mapper emits as key an object of custom type.
It has two fields an intWritable ID, and a data array IntArrayWritable.
The implementation is as follows.
`
import java.io.*;
import org.apache.hadoop.io.*;
public class PairDocIdPerm implements WritableComparable<PairDocIdPerm> {
public PairDocIdPerm(){
this.permId = new IntWritable(-1);
this.SignaturePerm = new IntArrayWritable();
}
public IntWritable getPermId() {
return permId;
}
public void setPermId(IntWritable permId) {
this.permId = permId;
}
public IntArrayWritable getSignaturePerm() {
return SignaturePerm;
}
public void setSignaturePerm(IntArrayWritable signaturePerm) {
SignaturePerm = signaturePerm;
}
private IntWritable permId;
private IntArrayWritable SignaturePerm;
public PairDocIdPerm(IntWritable permId,IntArrayWritable SignaturePerm) {
this.permId = permId;
this.SignaturePerm = SignaturePerm;
}
#Override
public void write(DataOutput out) throws IOException {
permId.write(out);
SignaturePerm.write(out);
}
#Override
public void readFields(DataInput in) throws IOException {
permId.readFields(in);
SignaturePerm.readFields(in);
}
#Override
public int hashCode() { // same permId must go to same reducer. there fore just permId
return permId.get();//.hashCode();
}
#Override
public boolean equals(Object o) {
if (o instanceof PairDocIdPerm) {
PairDocIdPerm tp = (PairDocIdPerm) o;
return permId.equals(tp.permId) && SignaturePerm.equals(tp.SignaturePerm);
}
return false;
}
#Override
public String toString() {
return permId + "\t" +SignaturePerm.toString();
}
#Override
public int compareTo(PairDocIdPerm tp) {
int cmp = permId.compareTo(tp.permId);
Writable[] ar, other;
ar = this.SignaturePerm.get();
other = tp.SignaturePerm.get();
if (cmp == 0) {
for(int i=0;i<ar.length;i++){
if(((IntWritable)ar[i]).get() == ((IntWritable)other[i]).get()){cmp= 0;continue;}
else if(((IntWritable)ar[i]).get() < ((IntWritable)other[i]).get()){ return -1;}
else if(((IntWritable)ar[i]).get() > ((IntWritable)other[i]).get()){return 1;}
}
}
return cmp;
//return 1;
}
}`
I require the keys with same Id to go to the same reducer with their sort order as coded in the compareTo method.
However when i use this, my job execution status is always map100% reduce 0%.
The reduce never runs to completion. Is there any thing wrong in this implementation?
In general what is the likely problem if reducer status is always 0%.
I think this might be a possible null pointer exception in the read method:
#Override
public void readFields(DataInput in) throws IOException {
permId.readFields(in);
SignaturePerm.readFields(in);
}
permId is null in this case.
So what you have to do is this:
IntWritable permId = new IntWritable();
Either in the field initializer or before the read.
However, your code is horrible to read.