As descibed in installation guid I copied file phoenix-5.0.0-HBase-2.0-server.jar into hbase library path /opt/hbase/lib/. The Hbase crashes at startup. If I remove it, hbase starting and running well again. What is the problem?
I use hadoop version:
$ hadoop version
Hadoop 3.1.3
Source code repository https://gitbox.apache.org/repos/asf/hadoop.git -r ba631c436b806728f8ec2f54ab1e289526c90579
Compiled by ztang on 2019-09-12T02:47Z
Compiled with protoc 2.5.0
From source with checksum ec785077c385118ac91aadde5ec9799
This command was run using /opt/hadoop/share/hadoop/common/hadoop-common-3.1.3.jar
Hbase version:
hbase(main):001:0> version
2.0.6, rd65cccb5fda039217954a558c65bda423e0d6df3, Wed Aug 14 15:44:48 UTC 2019
Took 0.0003 seconds
Error details in log file /opt/hbase/logs/hbase-master.log:
2020-06-06 14:54:48,454 ERROR [main] regionserver.HRegionServer: Failed construction RegionServer
java.lang.AbstractMethodError: org.apache.phoenix.trace.PhoenixMetricsSink.init(Lorg/apache/commons/configuration/SubsetConfiguration;)V
at org.apache.hadoop.metrics2.impl.MetricsConfig.getPlugin(MetricsConfig.java:199)
at org.apache.hadoop.metrics2.impl.MetricsSystemImpl.newSink(MetricsSystemImpl.java:529)
at org.apache.hadoop.metrics2.impl.MetricsSystemImpl.configureSinks(MetricsSystemImpl.java:501)
at org.apache.hadoop.metrics2.impl.MetricsSystemImpl.configure(MetricsSystemImpl.java:480)
at org.apache.hadoop.metrics2.impl.MetricsSystemImpl.start(MetricsSystemImpl.java:189)
at org.apache.hadoop.metrics2.impl.MetricsSystemImpl.init(MetricsSystemImpl.java:164)
at org.apache.hadoop.metrics2.lib.DefaultMetricsSystem.init(DefaultMetricsSystem.java:54)
at org.apache.hadoop.metrics2.lib.DefaultMetricsSystem.initialize(DefaultMetricsSystem.java:50)
at org.apache.hadoop.hbase.metrics.BaseSourceImpl$DefaultMetricsSystemInitializer.init(BaseSourceImpl.java:54)
at org.apache.hadoop.hbase.metrics.BaseSourceImpl.<init>(BaseSourceImpl.java:116)
at org.apache.hadoop.hbase.io.MetricsIOSourceImpl.<init>(MetricsIOSourceImpl.java:46)
at org.apache.hadoop.hbase.io.MetricsIOSourceImpl.<init>(MetricsIOSourceImpl.java:38)
at org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.createIO(MetricsRegionServerSourceFactoryImpl.java:84)
at org.apache.hadoop.hbase.io.MetricsIO.<init>(MetricsIO.java:35)
at org.apache.hadoop.hbase.io.hfile.HFile.<clinit>(HFile.java:195)
at org.apache.hadoop.hbase.regionserver.HRegionServer.<init>(HRegionServer.java:536)
at org.apache.hadoop.hbase.master.HMaster.<init>(HMaster.java:477)
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
at org.apache.hadoop.hbase.master.HMaster.constructMaster(HMaster.java:3050)
at org.apache.hadoop.hbase.master.HMasterCommandLine.startMaster(HMasterCommandLine.java:236)
at org.apache.hadoop.hbase.master.HMasterCommandLine.run(HMasterCommandLine.java:140)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:70)
at org.apache.hadoop.hbase.util.ServerCommandLine.doMain(ServerCommandLine.java:149)
at org.apache.hadoop.hbase.master.HMaster.main(HMaster.java:3068)
2020-06-06 14:54:48,456 ERROR [main] master.HMasterCommandLine: Master exiting
java.lang.RuntimeException: Failed construction of Master: class org.apache.hadoop.hbase.master.HMaster.
at org.apache.hadoop.hbase.master.HMaster.constructMaster(HMaster.java:3057)
at org.apache.hadoop.hbase.master.HMasterCommandLine.startMaster(HMasterCommandLine.java:236)
at org.apache.hadoop.hbase.master.HMasterCommandLine.run(HMasterCommandLine.java:140)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:70)
at org.apache.hadoop.hbase.util.ServerCommandLine.doMain(ServerCommandLine.java:149)
at org.apache.hadoop.hbase.master.HMaster.main(HMaster.java:3068)
Caused by: java.lang.AbstractMethodError: org.apache.phoenix.trace.PhoenixMetricsSink.init(Lorg/apache/commons/configuration/SubsetConfiguration;)V
at org.apache.hadoop.metrics2.impl.MetricsConfig.getPlugin(MetricsConfig.java:199)
at org.apache.hadoop.metrics2.impl.MetricsSystemImpl.newSink(MetricsSystemImpl.java:529)
at org.apache.hadoop.metrics2.impl.MetricsSystemImpl.configureSinks(MetricsSystemImpl.java:501)
at org.apache.hadoop.metrics2.impl.MetricsSystemImpl.configure(MetricsSystemImpl.java:480)
at org.apache.hadoop.metrics2.impl.MetricsSystemImpl.start(MetricsSystemImpl.java:189)
at org.apache.hadoop.metrics2.impl.MetricsSystemImpl.init(MetricsSystemImpl.java:164)
at org.apache.hadoop.metrics2.lib.DefaultMetricsSystem.init(DefaultMetricsSystem.java:54)
at org.apache.hadoop.metrics2.lib.DefaultMetricsSystem.initialize(DefaultMetricsSystem.java:50)
at org.apache.hadoop.hbase.metrics.BaseSourceImpl$DefaultMetricsSystemInitializer.init(BaseSourceImpl.java:54)
at org.apache.hadoop.hbase.metrics.BaseSourceImpl.<init>(BaseSourceImpl.java:116)
at org.apache.hadoop.hbase.io.MetricsIOSourceImpl.<init>(MetricsIOSourceImpl.java:46)
at org.apache.hadoop.hbase.io.MetricsIOSourceImpl.<init>(MetricsIOSourceImpl.java:38)
at org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.createIO(MetricsRegionServerSourceFactoryImpl.java:84)
at org.apache.hadoop.hbase.io.MetricsIO.<init>(MetricsIO.java:35)
at org.apache.hadoop.hbase.io.hfile.HFile.<clinit>(HFile.java:195)
at org.apache.hadoop.hbase.regionserver.HRegionServer.<init>(HRegionServer.java:536)
at org.apache.hadoop.hbase.master.HMaster.<init>(HMaster.java:477)
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
at org.apache.hadoop.hbase.master.HMaster.constructMaster(HMaster.java:3050)
... 5 more
Related
I am trying to use Lucene instead of ElasticSearch (used by default) when configuring JanusGraph. For this I changed the following in janusgraph-cql-es.properties
index.search.backend=lucene
index.search.directory=/data/searchindex
and also commented out
index.search.hostname=127.0.0.1
However the command to start Janusgraph bin/janusgraph.sh -v start gives this error
3718 [main] ERROR org.janusgraph.graphdb.server.JanusGraphServer - JanusGraph Server was unable to start and will now begin shutdown
java.lang.RuntimeException: java.lang.reflect.InvocationTargetException
at org.apache.tinkerpop.gremlin.server.util.ServerGremlinExecutor.<init>(ServerGremlinExecutor.java:97)
at org.apache.tinkerpop.gremlin.server.GremlinServer.<init>(GremlinServer.java:127)
at org.apache.tinkerpop.gremlin.server.GremlinServer.<init>(GremlinServer.java:90)
at org.janusgraph.graphdb.server.JanusGraphServer.start(JanusGraphServer.java:85)
at org.janusgraph.graphdb.server.JanusGraphServer.main(JanusGraphServer.java:53)
Caused by: java.lang.reflect.InvocationTargetException
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
at org.apache.tinkerpop.gremlin.server.util.ServerGremlinExecutor.<init>(ServerGremlinExecutor.java:86)
... 4 more
Caused by: java.lang.IllegalArgumentException: Could not instantiate implementation: org.janusgraph.diskstorage.lucene.LuceneIndex
at org.janusgraph.util.system.ConfigurationUtil.instantiate(ConfigurationUtil.java:79)
at org.janusgraph.diskstorage.Backend.getImplementationClass(Backend.java:527)
at org.janusgraph.diskstorage.Backend.getIndexes(Backend.java:511)
at org.janusgraph.diskstorage.Backend.<init>(Backend.java:239)
at org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.getBackend(GraphDatabaseConfiguration.java:1355)
at org.janusgraph.graphdb.database.StandardJanusGraph.<init>(StandardJanusGraph.java:184)
at org.janusgraph.core.JanusGraphFactory.lambda$open$0(JanusGraphFactory.java:165)
at org.janusgraph.graphdb.management.JanusGraphManager.openGraph(JanusGraphManager.java:239)
at org.janusgraph.core.JanusGraphFactory.open(JanusGraphFactory.java:165)
at org.janusgraph.core.JanusGraphFactory.open(JanusGraphFactory.java:115)
at org.janusgraph.graphdb.management.JanusGraphManager.lambda$new$0(JanusGraphManager.java:73)
at java.util.LinkedHashMap.forEach(LinkedHashMap.java:684)
at org.janusgraph.graphdb.management.JanusGraphManager.<init>(JanusGraphManager.java:72)
... 9 more
Caused by: java.lang.reflect.InvocationTargetException
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:423)
at org.janusgraph.util.system.ConfigurationUtil.instantiate(ConfigurationUtil.java:73)
... 21 more
Caused by: java.lang.NullPointerException
at java.io.File.<init>(File.java:279)
at org.janusgraph.diskstorage.lucene.LuceneIndex.<init>(LuceneIndex.java:185)
... 26 more
What configuration changes need to be done here? Or can Lucene only be used with HBase as storage backend as shown here?
Trying to run Hadoop on windows and getting this error
Exception in thread "main" java.lang.UnsatisfiedLinkError: org.apache.hadoop.io.nativeio.NativeIO$Windows.createDirectoryWithMode0(Ljava/lang/String;I)V
at org.apache.hadoop.io.nativeio.NativeIO$Windows.createDirectoryWithMode0(Native Method)
at org.apache.hadoop.io.nativeio.NativeIO$Windows.createDirectoryWithMode(NativeIO.java:521)
at org.apache.hadoop.fs.RawLocalFileSystem.mkOneDirWithMode(RawLocalFileSystem.java:502)
at org.apache.hadoop.fs.RawLocalFileSystem.mkdirsWithOptionalPermission(RawLocalFileSystem.java:555)
at org.apache.hadoop.fs.RawLocalFileSystem.mkdirs(RawLocalFileSystem.java:533)
at org.apache.hadoop.fs.FilterFileSystem.mkdirs(FilterFileSystem.java:313)
at org.apache.hadoop.mapreduce.JobSubmissionFiles.getStagingDir(JobSubmissionFiles.java:133)
at org.apache.hadoop.mapreduce.JobSubmitter.submitJobInternal(JobSubmitter.java:146)
at org.apache.hadoop.mapreduce.Job$11.run(Job.java:1341)
at org.apache.hadoop.mapreduce.Job$11.run(Job.java:1338)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1807)
at org.apache.hadoop.mapreduce.Job.submit(Job.java:1338)
at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:1359)
at dat.MaxTemperatureDriver.run(MaxTemperatureDriver.java:30)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:76)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:90)
at dat.MaxTemperatureDriver.main(MaxTemperatureDriver.java:37)
I already have winutils.exe, hadoop.dll, vcredist_x64.dll, vc_redist.x64.exe, vcredist_arm.exe, hdfs.dll in hadoop home/bin directory. and have that directory in PATH and LD_LIBRARY_PATH. What am I still missing?
I am getting ConnectionRefused error when I am running pig in mapreduce mode.
Details:
I have installed Pig from tarball( pig-0.14), and exported the classpath in bashrc.
I have all the Hadoop (hadoop-2.5) daemons up and running (confirmed by JPS).
[root#localhost sbin]# jps
2272 Jps
2130 DataNode
2022 NameNode
2073 SecondaryNameNode
2238 NodeManager
2190 ResourceManager
I am running pig in mapreduce mode:
[root#localhost sbin]# pig
grunt> file = LOAD '/input/pig_input.csv' USING PigStorage(',') AS (col1,col2,col3);
grunt> dump file;
And then I am getting the error:
java.io.IOException: java.net.ConnectException: Call From localhost.localdomain/127.0.0.1 to 0.0.0.0:10020 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused
at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:334)
at org.apache.hadoop.mapred.ClientServiceDelegate.getJobStatus(ClientServiceDelegate.java:419)
at org.apache.hadoop.mapred.YARNRunner.getJobStatus(YARNRunner.java:532)
at org.apache.hadoop.mapreduce.Cluster.getJob(Cluster.java:183)
at org.apache.pig.backend.hadoop.executionengine.shims.HadoopShims.getTaskReports(HadoopShims.java:231)
at org.apache.pig.tools.pigstats.mapreduce.MRJobStats.addMapReduceStatistics(MRJobStats.java:352)
at org.apache.pig.tools.pigstats.mapreduce.MRPigStatsUtil.addSuccessJobStats(MRPigStatsUtil.java:233)
at org.apache.pig.tools.pigstats.mapreduce.MRPigStatsUtil.accumulateStats(MRPigStatsUtil.java:165)
at org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher.launchPig(MapReduceLauncher.java:360)
at org.apache.pig.backend.hadoop.executionengine.HExecutionEngine.launchPig(HExecutionEngine.java:280)
at org.apache.pig.PigServer.launchPlan(PigServer.java:1390)
at org.apache.pig.PigServer.executeCompiledLogicalPlan(PigServer.java:1375)
at org.apache.pig.PigServer.storeEx(PigServer.java:1034)
at org.apache.pig.PigServer.store(PigServer.java:997)
at org.apache.pig.PigServer.openIterator(PigServer.java:910)
at org.apache.pig.tools.grunt.GruntParser.processDump(GruntParser.java:746)
at org.apache.pig.tools.pigscript.parser.PigScriptParser.parse(PigScriptParser.java:372)
at org.apache.pig.tools.grunt.GruntParser.parseStopOnError(GruntParser.java:230)
at org.apache.pig.tools.grunt.GruntParser.parseStopOnError(GruntParser.java:205)
at org.apache.pig.tools.grunt.Grunt.run(Grunt.java:66)
at org.apache.pig.Main.run(Main.java:558)
at org.apache.pig.Main.main(Main.java:170)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:483)
at org.apache.hadoop.util.RunJar.main(RunJar.java:212)
Caused by: java.net.ConnectException: Call From localhost.localdomain/127.0.0.1 to 0.0.0.0:10020 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:408)
at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:783)
at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:730)
at org.apache.hadoop.ipc.Client.call(Client.java:1415)
at org.apache.hadoop.ipc.Client.call(Client.java:1364)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206)
at com.sun.proxy.$Proxy15.getJobReport(Unknown Source)
at org.apache.hadoop.mapreduce.v2.api.impl.pb.client.MRClientProtocolPBClientImpl.getJobReport(MRClientProtocolPBClientImpl.java:133)
at sun.reflect.GeneratedMethodAccessor8.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:483)
at org.apache.hadoop.mapred.ClientServiceDelegate.invoke(ClientServiceDelegate.java:320)
... 26 more
Caused by: java.net.ConnectException: Connection refused
at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:712)
at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:529)
at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:493)
at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:606)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:700)
at org.apache.hadoop.ipc.Client$Connection.access$2800(Client.java:367)
at org.apache.hadoop.ipc.Client.getConnection(Client.java:1463)
at org.apache.hadoop.ipc.Client.call(Client.java:1382)
... 34 more
I haven't configured any config file in pig tarball.Do I need to make any changes in the pig configuration to make it identify the hadoop installation?
In bashrc the HADOOP_HOME is set properly.
Please help.
To start it,go to hadoop's sbin dir and then type the command
mr-jobhistory-server.sh start historyserver --config $HADOOP_CONF_DIR
$HADOOP_CONF_DIR is the directory where hadoop's config files like hdfs-site.xml etc. reside.
Found the solution.
The line:
Caused by: java.net.ConnectException: Call From localhost.localdomain/127.0.0.1 to 0.0.0.0:10020 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused
comes when JobHistoryServer is not up.
So starting the JobHistoryServer would eliminate this issue.
To start it, go to hadoop's sbin dir and then the command:
mr-jobhistory-server.sh start
Do a jps to check if JobHistoryServer is up, and then re-execute your Pig commands.
Updated command(as of 2017):
./mr-jobhistory-daemon.sh --config $HADOOP_CONF_DIR start historyserver
check
jps
I have just started to work with Hadoop 2.
After installing with basic configs, I always failed to run any examples. Has anyone seen this problem and please help me?
And the error is something like
Error starting MRAppMaster
java.lang.RuntimeException: java.lang.reflect.InvocationTargetException
This is the log
20152015-01-06 11:56:23,194 INFO [main] org.apache.hadoop.mapreduce.v2.app.MRAppMaster: Created MRAppMaster for application appattempt_1420510526926_0002_000001
2015-01-06 11:56:23,362 FATAL [main] org.apache.hadoop.mapreduce.v2.app.MRAppMaster: Error starting MRAppMaster
java.lang.RuntimeException: java.lang.reflect.InvocationTargetException
at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:131)
at org.apache.hadoop.security.Groups.<init>(Groups.java:70)
at org.apache.hadoop.security.Groups.<init>(Groups.java:66)
at org.apache.hadoop.security.Groups.getUserToGroupsMappingService(Groups.java:280)
at org.apache.hadoop.security.UserGroupInformation.initialize(UserGroupInformation.java:271)
at org.apache.hadoop.security.UserGroupInformation.setConfiguration(UserGroupInformation.java:299)
at org.apache.hadoop.mapreduce.v2.app.MRAppMaster.initAndStartAppMaster(MRAppMaster.java:1473)
at org.apache.hadoop.mapreduce.v2.app.MRAppMaster.main(MRAppMaster.java:1429)
Caused by: java.lang.reflect.InvocationTargetException
at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:408)
at org.apache.hadoop.util.ReflectionUtils.newInstance(ReflectionUtils.java:129)
... 7 more
Caused by: java.lang.UnsatisfiedLinkError: org.apache.hadoop.security.JniBasedUnixGroupsMapping.anchorNative()V
at org.apache.hadoop.security.JniBasedUnixGroupsMapping.anchorNative(Native Method)
at org.apache.hadoop.security.JniBasedUnixGroupsMapping.<clinit>(JniBasedUnixGroupsMapping.java:49)
at org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback.<init>(JniBasedUnixGroupsMappingWithFallback.java:39)
... 12 more
2015-01-06 11:56:23,366 INFO [main] org.apache.hadoop.util.ExitUtil: Exiting with status 1
Error messages: "Error starting MRAppMaster", "InvocationTargetException", "UnsatisfiedLinkError"
Root cause: Fail to execute the native function "anchorNative" in the class "org.apache.hadoop.security.JniBasedUnixGroupsMapping"
Description: the function "anchorNative" will call a function in the library "libhadoop.so". The path of this library is specified by these environment variables:
export JAVA_LIBRARY_PATH
export HADOOP_COMMON_LIB_NATIVE_DIR
In the Hadoop source code, print the java library class path by
System.err.println( (System.getProperty("java.library.path") );
# result
/home/maidinh/hadoop2/build/hadoop-2.6.0-src/hadoop-dist/target/hadoop-2.6.0/lib/native:
/usr/java/packages/lib/amd64:
/usr/lib64:
/lib64:
/lib:
/usr/lib
Different versions of the library "libhadoop.so" can be found in these locations that make a conflict.
Solution: Except the right path of native library (in hadoop-2.6.0/lib/native), delete all "libhadoop.so" in other directories.
Notes: delete all related libraries of hadoop
rm -r libhadoop*
rm -r libhdfs*
I am new to Hadoop. I am using windows 7 and cygwin to work on Hadoop 1.2.1. Mine is a single node system. I realized the task tracker was not starting up whenever i run the mapreduce script file, therefore I applied the hadoop 7682 patch from https://github.com/congainc/patch-hadoop_7682-1.0.x-win to solve the problem
I added the jar file to the libs folder and also modified the core-site xml file. Now, I am able to run the task tracker. However, now If I am trying to run any program which makes use of map reduce, say from example, using the mahout clustering command or any command that has to do with mapreduce.
$MAHOUT_HOME/bin/mahout org.apache.mahout.clustering.syntheticcontrol.kmeans.Job
I get the below error
"Exception in thread "main" java.lang.RuntimeException: java.lang.ClassNotFoundException solution"
$ bin/mahout org.apache.mahout.clustering.syntheticcontrol.kmeans.Job MAHOUT_LOCAL is not set; adding HADOOP_CONF_DIR to classpath.
hadoop binary is not in PATH,HADOOP_HOME/bin,HADOOP_PREFIX/bin, running locally
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/C:/Mahout/trunk/examples/target/mahout-examples-0.9-SNAPSHOT-job.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/C:/Mahout/trunk/examples/target/dependency/slf4j-jcl-1.7.5.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.JCLLoggerFactory]
Aug 27, 2013 5:53:44 PM org.slf4j.impl.JCLLoggerAdapter warn
WARNING: No org.apache.mahout.clustering.syntheticcontrol.kmeans.Job.props found on classpath, will use command-line arguments only
Aug 27, 2013 5:53:45 PM org.slf4j.impl.JCLLoggerAdapter info
INFO: Running with default arguments
Aug 27, 2013 5:53:53 PM org.slf4j.impl.JCLLoggerAdapter info
INFO: Preparing Input
Aug 27, 2013 5:53:55 PM org.apache.hadoop.mapred.JobClient copyAndConfigureFiles
WARNING: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.
Aug 27, 2013 5:53:55 PM org.apache.hadoop.mapred.JobClient$2 run
INFO: Cleaning up the staging area hdfs://localhost:9000/tmp/hadoop-USER/mapred/staging/USER/.staging/job_201308271750_0001
Exception in thread "main" java.lang.RuntimeException: java.lang.ClassNotFoundException: com.conga.services.hadoop.patch.HADOOP_7682.WinLocalFileSystem
at org.apache.hadoop.conf.Configuration.getClass(Configuration.java:857)
at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:1440)
at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:67)
at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:1464)
at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:263)
at org.apache.hadoop.fs.FileSystem.getLocal(FileSystem.java:234)
at org.apache.hadoop.fs.FileSystem.copyFromLocalFile(FileSystem.java:1230)
at org.apache.hadoop.fs.FileSystem.copyFromLocalFile(FileSystem.java:1206)
at org.apache.hadoop.fs.FileSystem.copyFromLocalFile(FileSystem.java:1178)
at org.apache.hadoop.mapred.JobClient.copyAndConfigureFiles(JobClient.java:864)
at org.apache.hadoop.mapred.JobClient.copyAndConfigureFiles(JobClient.java:734)
at org.apache.hadoop.mapred.JobClient.access$400(JobClient.java:179)
at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:951)
at org.apache.hadoop.mapred.JobClient$2.run(JobClient.java:936)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1190)
at org.apache.hadoop.mapred.JobClient.submitJobInternal(JobClient.java:936)
at org.apache.hadoop.mapreduce.Job.submit(Job.java:550)
at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:580)
at org.apache.mahout.clustering.conversion.InputDriver.runJob(InputDriver.java:108)
at org.apache.mahout.clustering.syntheticcontrol.kmeans.Job.run(Job.java:130)
at org.apache.mahout.clustering.syntheticcontrol.kmeans.Job.main(Job.java:60)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:601)
at org.apache.hadoop.util.ProgramDriver$ProgramDescription.invoke(ProgramDriver.java:68)
at org.apache.hadoop.util.ProgramDriver.driver(ProgramDriver.java:139)
at org.apache.mahout.driver.MahoutDriver.main(MahoutDriver.java:195)
Caused by: java.lang.ClassNotFoundException: com.conga.services.hadoop.patch.HADOOP_7682.WinLocalFileSystem
at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
at java.lang.ClassLoader.loadClass(ClassLoader.java:423)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
at java.lang.ClassLoader.loadClass(ClassLoader.java:356)
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:264)
at org.apache.hadoop.conf.Configuration.getClassByName(Configuration.java:810)
at org.apache.hadoop.conf.Configuration.getClass(Configuration.java:855)
... 29 more
Am I missing something here?