Avro AvroMultipleOutputs part-r-00000: File is not open for writing Exception - hadoop

I have written a MapReduce Job with Avro 1.7.4 on Hadoop 2.3.0. In the first step I wrote all the Avro Results in a AvroSequenceFile. Everything worked well without problems.
Then I tried to use the AvroMultipleOutputs class to write the results in different files. I wrote the same MapReduce Job without using Avro and it was no problem to write the data in two separate files (btw. part-r-00000 was created in hdfs but left empty).
The Avro variant produces exceptions if I write data in the reducer. (If I comment out the lines which write data out I get no exceptions).
Here is Job configurations:
this.conf = new Configuration();
Job job = Job.getInstance(conf);
job.setJarByClass(ArchiveDataProcessorMR.class);
Path inPath = new Path(props.getProperty("inPath").trim());
Path outPath = new Path(props.getProperty("outPath").trim());
Path outPathMeta = new Path(props.getProperty("outPath.meta").trim());
Path outPathPayload = new Path(props.getProperty("outPath.payload").trim());
// cleanup resources from previous run
FileSystem fs = FileSystem.get(conf);
fs.delete(outPath, true);
FileInputFormat.setInputPaths(job, inPath);
FileOutputFormat.setOutputPath(job, outPath);
AvroSequenceFileInputFormat<DataExportKey,DataExportValue> sequenceInputFormat = new AvroSequenceFileInputFormat<DataExportKey,DataExportValue>();
job.setInputFormatClass(sequenceInputFormat.getClass());
AvroJob.setInputValueSchema(job, DataExportValue.getClassSchema());
AvroJob.setInputKeySchema(job, DataExportKey.getClassSchema());
AvroJob.setMapOutputValueSchema(job, DataExportValue.getClassSchema());
AvroJob.setMapOutputKeySchema(job, DataExportKey.getClassSchema());
job.setMapperClass(ArchiveDataMapper.class);
job.setReducerClass(ArchiveDataReducer.class);
AvroSequenceFileOutputFormat<DataExportKey,DataExportValue> sequenceOutputFormat = new AvroSequenceFileOutputFormat<DataExportKey,DataExportValue>();
AvroJob.setOutputKeySchema(job, DataExportKey.getClassSchema());
AvroJob.setOutputValueSchema(job, DataExportValue.getClassSchema());
job.setOutputFormatClass(AvroSequenceFileOutputFormat.class);
AvroMultipleOutputs.addNamedOutput(job, "meta", sequenceOutputFormat.getClass(), DataExportKey.getClassSchema(), DataExportValue.getClassSchema());
AvroMultipleOutputs.addNamedOutput(job, "payload", sequenceOutputFormat.getClass(), DataExportKey.getClassSchema(), DataExportValue.getClassSchema());
The reducer code (without the business logic) looks like
public static class ArchiveDataReducer extends Reducer<AvroKey<DataExportKey>, AvroValue<DataExportValue>,AvroKey<DataExportKey>,AvroValue<DataExportValue>> {
private AvroMultipleOutputs amos;
public void setup(Context context) throws IOException, InterruptedException {
this.amos = new AvroMultipleOutputs(context);
}
public void cleanup(Context context) throws IOException, InterruptedException {
this.amos.close();
}
/**
* #param key
*/
public void reduce(AvroKey<DataExportKey> key, Iterable<AvroValue<DataExportValue>> xmlIter, Context context) throws IOException, InterruptedException {
try {
DataExportValue newValue = new DataExportValue();
if (key.datum()......) {
... snip...
amos.write("meta",key, new AvroValue<DataExportValue>(newValue));
} else {
... snip...
amos.write("payload",key, new AvroValue<DataExportValue>(newValue));
}
} catch (Exception e) {
e.printStackTrace();
}
}
} // class ArchiveDataReducer
The exception message is
14/05/04 06:52:58 INFO mapreduce.Job: map 100% reduce 0%
14/05/04 06:53:09 INFO mapreduce.Job: map 100% reduce 91%
14/05/04 06:53:09 INFO mapreduce.Job: Task Id : attempt_1399104292130_0016_r_000000_1, Status : FAILED
Error: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException): No lease on /applications/wd/data_avro/_temporary/1/_temporary/attempt_1399104292130_0016_r_000000_1/part-r-00000: File is not open for writing. Holder DFSClient_attempt_1399104292130_0016_r_000000_1_338983539_1 does not have any open files.
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:2856)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.analyzeFileState(FSNamesystem.java:2667)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2573)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:563)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:407)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:585)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:928)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1962)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1958)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1548)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1956)
at org.apache.hadoop.ipc.Client.call(Client.java:1406)
at org.apache.hadoop.ipc.Client.call(Client.java:1359)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206)
at com.sun.proxy.$Proxy10.addBlock(Unknown Source)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:186)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy10.addBlock(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:348)
at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.locateFollowingBlock(DFSOutputStream.java:1264)
at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.nextBlockOutputStream(DFSOutputStream.java:1112)
at org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:522)
Any hints how to solve this problem? Did you get another example with AvroMultipleOutputs running? I would like to see your code.
Kind regards
Martin

Related

How to fix this issue that I'm getting while executing map-reduce java code in Eclipse in Windows 10?

I am trying to execute a Java Program to Count the words in the input file using mapreduce in hadoop. I am using windows 10 and eclipse IDE.
I am getting fileNotFoundException when the reducer starts executing. Mapper executes completely. Please help to resolve the issue. Stuck here for quite a while.
public class CountMax {
public static class Map extends Mapper<LongWritable,Text,Text,IntWritable> {
public void map(LongWritable key, Text value,Context context) throws IOException,InterruptedException{
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
value.set(tokenizer.nextToken());
context.write(value, new IntWritable(1));
}
System.out.println("In mapper");
}
}
public static class Reduce extends Reducer<Text,IntWritable,Text,IntWritable> {
public int maxCount = 0;
public String maxCountWord = "";
public void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException,InterruptedException {
int sum=0;
for(IntWritable x: values)
sum+=x.get();
if(sum>maxCount){
maxCountWord = key.toString();
maxCount = sum;
System.out.println(sum);
System.out.println(key);
}
System.out.println(maxCountWord);
}
public void setup(Context context)throws IOException, InterruptedException {
System.out.println("in SETUP");
}
protected void cleanup(Context context)throws IOException,InterruptedException {
context.write( new Text(maxCountWord), new IntWritable(maxCount) );
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "word count");
job.setJarByClass(CountMax.class);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
Path inp = new Path("C:/input.txt");
Path out = new Path("C:/output");
FileInputFormat.addInputPath(job, inp);
FileOutputFormat.setOutputPath(job, out);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}
This is what I'm getting in the console :
2019-10-07 00:07:39,461 INFO mapred.LocalJobRunner (LocalJobRunner.java:run(249)) - Finishing task: attempt_local2096667908_0001_m_000000_0
2019-10-07 00:07:39,461 INFO mapred.LocalJobRunner (LocalJobRunner.java:runTasks(456)) - map task executor complete.
2019-10-07 00:07:39,463 INFO mapred.LocalJobRunner (LocalJobRunner.java:runTasks(448)) - Waiting for reduce tasks
2019-10-07 00:07:39,463 INFO mapred.LocalJobRunner (LocalJobRunner.java:run(302)) - Starting task: attempt_local2096667908_0001_r_000000_0
2019-10-07 00:07:39,472 INFO output.FileOutputCommitter (FileOutputCommitter.java:<init>(108)) - File Output Committer Algorithm version is 1
2019-10-07 00:07:39,472 INFO util.ProcfsBasedProcessTree (ProcfsBasedProcessTree.java:isAvailable(192)) - ProcfsBasedProcessTree currently is supported only on Linux.
2019-10-07 00:07:39,499 INFO mapred.Task (Task.java:initialize(614)) - Using ResourceCalculatorProcessTree : org.apache.hadoop.yarn.util.WindowsBasedProcessTree#7734a2ef
2019-10-07 00:07:39,501 INFO mapred.ReduceTask (ReduceTask.java:run(362)) - Using ShuffleConsumerPlugin: org.apache.hadoop.mapreduce.task.reduce.Shuffle#18daa432
2019-10-07 00:07:39,509 INFO reduce.MergeManagerImpl (MergeManagerImpl.java:<init>(205)) - MergerManager: memoryLimit=1314232704, maxSingleShuffleLimit=328558176, mergeThreshold=867393600, ioSortFactor=10, memToMemMergeOutputsThreshold=10
2019-10-07 00:07:39,510 INFO reduce.EventFetcher (EventFetcher.java:run(61)) - attempt_local2096667908_0001_r_000000_0 Thread started: EventFetcher for fetching Map Completion Events
2019-10-07 00:07:39,528 INFO mapred.LocalJobRunner (LocalJobRunner.java:runTasks(456)) - reduce task executor complete.
2019-10-07 00:07:39,532 WARN mapred.LocalJobRunner (LocalJobRunner.java:run(560)) - job_local2096667908_0001
java.lang.Exception: org.apache.hadoop.mapreduce.task.reduce.Shuffle$ShuffleError: error in shuffle in localfetcher#1
at org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:462)
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:529)
Caused by: org.apache.hadoop.mapreduce.task.reduce.Shuffle$ShuffleError: error in shuffle in localfetcher#1
at org.apache.hadoop.mapreduce.task.reduce.Shuffle.run(Shuffle.java:134)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:376)
at org.apache.hadoop.mapred.LocalJobRunner$Job$ReduceTaskRunnable.run(LocalJobRunner.java:319)
at java.util.concurrent.Executors$RunnableAdapter.call(Unknown Source)
at java.util.concurrent.FutureTask.run(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
at java.lang.Thread.run(Unknown Source)
Caused by: java.io.FileNotFoundException: C:/tmp/hadoop-SahilJ%20PC/mapred/local/localRunner/SahilJ%20PC/jobcache/job_local2096667908_0001/attempt_local2096667908_0001_m_000000_0/output/file.out.index
at org.apache.hadoop.fs.RawLocalFileSystem.open(RawLocalFileSystem.java:200)
at org.apache.hadoop.fs.FileSystem.open(FileSystem.java:768)
at org.apache.hadoop.io.SecureIOUtils.openFSDataInputStream(SecureIOUtils.java:155)
at org.apache.hadoop.mapred.SpillRecord.<init>(SpillRecord.java:71)
at org.apache.hadoop.mapred.SpillRecord.<init>(SpillRecord.java:62)
at org.apache.hadoop.mapred.SpillRecord.<init>(SpillRecord.java:57)
at org.apache.hadoop.mapreduce.task.reduce.LocalFetcher.copyMapOutput(LocalFetcher.java:124)
at org.apache.hadoop.mapreduce.task.reduce.LocalFetcher.doCopy(LocalFetcher.java:102)
at org.apache.hadoop.mapreduce.task.reduce.LocalFetcher.run(LocalFetcher.java:85)
Just making a new user account without any space in the name, with all the admin rights and doing the whole setup again in the new account worked and solved the issue.

Reading ORC File using MapReduce

I'm trying to read ORC File compressed using SNAPPY via MapReduce. My intent is just to leverage IdentityMapper, essentially to merge small files. However, I get NullPointerException on doing so. I can see from the log that schema is being inferred, do I still need to set the schema for the output file from mapper ?
public class Test {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
Job job = new Job(conf, "test");
job.setJarByClass(Test.class);
job.setMapperClass(Mapper.class);
conf.set("orc.compress", "SNAPPY");
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(Writable.class);
job.setInputFormatClass(OrcInputFormat.class);
job.setOutputFormatClass(OrcOutputFormat.class);
job.setNumReduceTasks(0);
String source = args[0];
String target = args[1];
FileInputFormat.setInputPath(job, new Path(source))
FileOutputFormat.setOutputPath(job, new Path(target));
boolean result = job.waitForCompletion(true);
System.exit(result ? 0 : 1);
}
Error: java.lang.NullPointerException at
org.apache.orc.impl.WriterImpl.(WriterImpl.java:178) at
org.apache.orc.OrcFile.createWriter(OrcFile.java:559) at
org.apache.orc.mapreduce.OrcOutputFormat.getRecordWriter(OrcOutputFormat.java:55)
at
org.apache.hadoop.mapred.MapTask$NewDirectOutputCollector.(MapTask.java:644)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:764) at
org.apache.hadoop.mapred.MapTask.run(MapTask.java:341) at
org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:168) at
java.security.AccessController.doPrivileged(Native Method) at
javax.security.auth.Subject.doAs(Subject.java:415) at
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1642)
at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:163)

Hadoop MultipleOutputs in Reducer with FileAlreadyExistsException

I use MultipleOutputs in a reducer. The multiple output will write file to a folder called NewIdentities. The code is shown as below:
private MultipleOutputs<Text,Text> mos;
#Override
public void reduce(Text inputKey, Iterable<Text> values, Context context) throws IOException, InterruptedException {
......
// output to change report
if (ischangereport.equals("TRUE")) {
mos.write(new Text(e.getHID()), new Text(changereport.deleteCharAt(changereport.length() - 1).toString()), "NewIdentities/");
}
}
}
#Override
public void setup(Context context) {
mos = new MultipleOutputs<Text,Text>(context);
}
#Override
protected void cleanup(Context context) throws IOException, InterruptedException {
mos.close();
}
It can run previously. But when I run it today, it throws an exception as below. My hadoop version is 2.4.0.
Error: org.apache.hadoop.fs.FileAlreadyExistsException: /CaptureOnlyMatchIndex9/TEMP/ChangeReport/NewIdentities/-r-00000 for client 192.168.71.128 already exists at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:2297) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2225) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2178) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:520) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:354) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:585) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:928) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2013) at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2009) at java.security.AccessController.doPrivileged(Native Method) at javax.security.auth.Subject.doAs(Subject.java:415) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1548) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2007) at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:526) at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:106) at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:73) at org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:1604) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1465) at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1390) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:394) at org.apache.hadoop.hdfs.DistributedFileSystem$6.doCall(DistributedFileSystem.java:390) at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:390) at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:334) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:906) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:887) at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:784) at org.apache.hadoop.mapreduce.lib.output.TextOutputFormat.getRecordWriter(TextOutputFormat.java:132) at org.apache.hadoop.mapreduce.lib.output.MultipleOutputs.getRecordWriter(MultipleOutputs.java:475) at
I found the reason for it. Because in one of my reducers, it run out of the memory. So it throws out an out-of-memory exception implicitly. The hadoop stops the current multiple output. And maybe another thread of reducer want to output, so it creates another multiple output object, so the collision happens.

Hadoop Exception in thread "main" java.io.FileNotFoundException: hadoop-mapreduce-client-core-2.6.0.jar even though that file exists

I am working with hadoop-2.6.0 and hbase-0.98.9. While running the hadoop job, it is throwing java.io.FileNotFoundException even though that file exists and also exists in the classpath, but still it is looking for in hdfs:// path. What could be the problem? I did check here, but that question is for third party jars. Here it is in the classpath. Here is the error.
15/05/23 02:08:39 INFO zookeeper.ZooKeeper: Initiating client connection, connectString=localhost:2181 sessionTimeout=90000 watcher=hconnection-0x6737ca, quorum=localhost:2181, baseZNode=/hbase
15/05/23 02:08:39 INFO zookeeper.ClientCnxn: Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error)
15/05/23 02:08:39 INFO zookeeper.ClientCnxn: Socket connection established to localhost/127.0.0.1:2181, initiating session
15/05/23 02:08:39 INFO zookeeper.ClientCnxn: Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x14d7fd352eb000c, negotiated timeout = 40000
15/05/23 02:08:40 INFO mapreduce.TableOutputFormat: Created table instance for Energy
15/05/23 02:08:40 WARN mapreduce.JobSubmitter: Hadoop command-line option parsing not performed. Implement the Tool interface and execute your application with ToolRunner to remedy this.
15/05/23 02:08:40 INFO mapreduce.JobSubmitter: Cleaning up the staging area file:/home/vijaykumar/hadoop/hadoop_tmpdir/mapred/staging/vijaykumar1706101359/.staging/job_local1706101359_0001
Exception in thread "main" java.io.FileNotFoundException: File does not exist: hdfs://localhost:54310/home/vijaykumar/hadoop/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.6.0.jar
at org.apache.hadoop.hdfs.DistributedFileSystem$18.doCall(DistributedFileSystem.java:1122)
at org.apache.hadoop.hdfs.DistributedFileSystem$18.doCall(DistributedFileSystem.java:1114)
at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
at org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:1114)
at org.apache.hadoop.mapreduce.filecache.ClientDistributedCacheManager.getFileStatus(ClientDistributedCacheManager.java:288)
at org.apache.hadoop.mapreduce.filecache.ClientDistributedCacheManager.getFileStatus(ClientDistributedCacheManager.java:224)
at org.apache.hadoop.mapreduce.filecache.ClientDistributedCacheManager.determineTimestamps(ClientDistributedCacheManager.java:93)
at org.apache.hadoop.mapreduce.filecache.ClientDistributedCacheManager.determineTimestampsAndCacheVisibilities(ClientDistributedCacheManager.java:57)
at org.apache.hadoop.mapreduce.JobSubmitter.copyAndConfigureFiles(JobSubmitter.java:269)
at org.apache.hadoop.mapreduce.JobSubmitter.copyAndConfigureFiles(JobSubmitter.java:390)
at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:483)
at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1296)
at org.apache.hadoop.mapreduce.Job$10.run(Job.java:1293)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1628)
at org.apache.hadoop.mapreduce.Job.submit(Job.java:1293)
at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1314)
at habseWrite.run(habseWrite.java:142)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:70)
at habseWrite.main(habseWrite.java:107)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
Mapper class
public static class WriteMapper extends Mapper<LongWritable, Text, IntWritable, Text> {
IntWritable k = new IntWritable();
Text res = new Text();
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String val = value.toString();
int row = val.hashCode();
k.set(row);
res.set(val);
context.write(k, res);
}
}
Reducer code
public static class WriteReducer extends TableReducer<IntWritable, Text, Text> {
public static final byte[] area = "Area".getBytes();
public static final byte[] prop = "Property".getBytes();
private Text rowkey = new Text();
private int rowCount = 0;
public void reduce(IntWritable key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
String X1 = "",X2="",X3="",X4="",X5="",
X6="",X7="",X8="",Y1="",Y2="";
for (Text val : values) {
String[] v = val.toString().split("\t");
X1 = v[0];
X2 = v[1];
X3 = v[2];
X4 = v[3];
X5 = v[4];
X6 = v[5];
X7 = v[6];
X8 = v[7];
Y1 = v[8];
Y2 = v[9];
}
String k = "row"+rowCount;
Put put = new Put(Bytes.toBytes(k.toString()));
put.add(area, "X1".getBytes(), Bytes.toBytes(X1));
put.add(area, "X5".getBytes(), Bytes.toBytes(X5));
put.add(area, "X6".getBytes(), Bytes.toBytes(X6));
put.add(area, "Y1".getBytes(), Bytes.toBytes(Y1));
put.add(area, "Y2".getBytes(), Bytes.toBytes(Y2));
put.add(prop, "X2".getBytes(), Bytes.toBytes(X2));
put.add(prop, "X3".getBytes(), Bytes.toBytes(X3));
put.add(prop, "X4".getBytes(), Bytes.toBytes(X4));
put.add(prop, "X7".getBytes(), Bytes.toBytes(X7));
put.add(prop, "X8".getBytes(), Bytes.toBytes(X8));
rowCount++;
rowkey.set(k);
context.write(rowkey, put);
}
}
And main
public static void main(String[] args) throws Exception {
int res = ToolRunner.run(new Configuration(), new habseWrite(), args);
System.exit(res);
}
public int run(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = HBaseConfiguration.create();
String inputPath = args[0];
Job job = new Job(conf,"HBase_write");
job.setInputFormatClass(TextInputFormat.class);
job.setJarByClass(habseWrite.class);
job.setMapperClass(habseWrite.WriteMapper.class);
job.setMapOutputKeyClass(IntWritable.class);
job.setMapOutputValueClass(Text.class);
TableMapReduceUtil.initTableReducerJob(
"Energy", // output table
WriteReducer.class, // reducer class
job);
job.setNumReduceTasks(1);
FileInputFormat.setInputPaths(job, inputPath);
return(job.waitForCompletion(true) ? 0 : 1 );
}

Getting Hbase Exception No regions passed

Hi i am new to Hbase and im trying to learn how to load bulk data to Hbase table using MapReduce
But i am getting below Exception
Exception in thread "main" java.lang.IllegalArgumentException: No regions passed
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.writePartitions(HFileOutputFormat2.java:307)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.configurePartitioner(HFileOutputFormat2.java:527)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.configureIncrementalLoad(HFileOutputFormat2.java:391)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.configureIncrementalLoad(HFileOutputFormat2.java:356)
at JobDriver.run(JobDriver.java:108)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:70)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:84)
at JobDriver.main(JobDriver.java:34)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.util.RunJar.main(RunJar.java:212)
This is mY Mapper Code
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
System.out.println("Value in Mapper"+value.toString());
String[] values = value.toString().split(",");
byte[] row = Bytes.toBytes(values[0]);
ImmutableBytesWritable k = new ImmutableBytesWritable(row);
KeyValue kvProtocol = new KeyValue(row, "PROTOCOLID".getBytes(), "PROTOCOLID".getBytes(), values[1]
.getBytes());
context.write(k, kvProtocol);
}
This is my Job Configuration
public class JobDriver extends Configured implements Tool{
public static void main(String[] args) throws Exception {
// TODO Auto-generated method stub
ToolRunner.run(new JobDriver(), args);
System.exit(0);
}
#Override
public int run(String[] arg0) throws Exception {
// TODO Auto-generated method stub
// HBase Configuration
System.out.println("**********Starting Hbase*************");
Configuration conf = HBaseConfiguration.create();
Job job = new Job(conf, "TestHFileToHBase");
job.setJarByClass(JobDriver.class);
job.setOutputKeyClass(ImmutableBytesWritable.class);
job.setOutputValueClass(KeyValue.class);
job.setMapperClass(LoadMapper.class);
job.setOutputFormatClass(HFileOutputFormat2.class);
HTable table = new HTable(conf, "kiran");
FileInputFormat.addInputPath(job, new Path("hdfs://192.168.61.62:9001/sampledata.csv"));
FileOutputFormat.setOutputPath(job, new Path("hdfs://192.168.61.62:9001/deletions_6.csv"));
HFileOutputFormat2.configureIncrementalLoad(job, table);
//System.exit(job.waitForCompletion(true) ? 0 : 1);
return job.waitForCompletion(true) ? 0 : 1;
}
}
Can Anyone please help me in resolvin the exception.
You have to create the table first. You can do it with the below code
//Create table and do pre-split
HTableDescriptor descriptor = new HTableDescriptor(
Bytes.toBytes(tableName)
);
descriptor.addFamily(
new HColumnDescriptor(Constants.COLUMN_FAMILY_NAME)
);
HBaseAdmin admin = new HBaseAdmin(config);
byte[] startKey = new byte[16];
Arrays.fill(startKey, (byte) 0);
byte[] endKey = new byte[16];
Arrays.fill(endKey, (byte)255);
admin.createTable(descriptor, startKey, endKey, REGIONS_COUNT);
admin.close();
or directly from the hbase shell with the command:
create 'kiran', 'colfam1'
The exception is caused because the startkeys list is empty: line 306
More info can be found here.
Note that the table name must be the same with the one you use in your code (kiran).

Resources