2013-08-18 20 views
3

最近我开始用hadoop了。现在我想从一个远程主机访问hdfs,它不会安装hadoop-client,只是依赖于hadoop-client-2.0.4-alpha.jar。我有个远程访问HDFS的例外,请帮忙~~

但是,当我试图访问HDFS,我得到了以下异常:

java.io.IOException: Failed on local exception: com.google.protobuf.InvalidProtocolBufferException: Message missing required fields: callId, status; Host Details : local host is: "webserver/127.0.0.1"; destination host is: "222.333.111.77":8020; 
     at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:761) 
     at org.apache.hadoop.ipc.Client.call(Client.java:1239) 
     at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:202) 
     at $Proxy25.getFileInfo(Unknown Source) 
     at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) 
     at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39) 
     at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25) 
     at java.lang.reflect.Method.invoke(Method.java:597) 
     at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:164) 
     at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:83) 
     at $Proxy25.getFileInfo(Unknown Source) 
     at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getFileInfo(ClientNamenodeProtocolTranslatorPB.java:630) 
     at org.apache.hadoop.hdfs.DFSClient.getFileInfo(DFSClient.java:1559) 
     at org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:811) 
     at org.apache.hadoop.fs.FileSystem.exists(FileSystem.java:1345) 
     at com.kongming.kmdata.service.ExportService.copyToLocalFileFromHdfs(ExportService.java:60) 
     at com.kongming.kmdata.service.KMReportManager.run(KMReportManager.java:105) 
     at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:441) 
     at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303) 
     at java.util.concurrent.FutureTask.run(FutureTask.java:138) 
     at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886) 
     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908) 
     at java.lang.Thread.run(Thread.java:662) 
Caused by: com.google.protobuf.InvalidProtocolBufferException: Message missing required fields: callId, status 
     at com.google.protobuf.UninitializedMessageException.asInvalidProtocolBufferException(UninitializedMessageException.java:81) 
     at org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos$RpcResponseHeaderProto$Builder.buildParsed(RpcPayloadHeaderProtos.java:1094) 
     at org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos$RpcResponseHeaderProto$Builder.access$1300(RpcPayloadHeaderProtos.java:1028) 
     at org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos$RpcResponseHeaderProto.parseDelimitedFrom(RpcPayloadHeaderProtos.java:986) 
     at org.apache.hadoop.ipc.Client$Connection.receiveResponse(Client.java:946) 
     at org.apache.hadoop.ipc.Client$Connection.run(Client.java:844) 

它看起来像一个RPC例外,如何解决呢?这里是我的代码:

package com.xxx.xxx.service; 

import org.apache.hadoop.conf.Configuration; 
import org.apache.hadoop.fs.FileSystem; 
import org.apache.hadoop.fs.Path; 
import org.apache.log4j.Logger; 

import com.xxx.xxx.fileSystem.IFilePath; 
import com.xxx.xxx.inject.GuiceDependency; 

public class ExportService { 
private static Logger log = Logger.getLogger(ExportService.class); 

private static Configuration configuration = new Configuration(); 

private static String dir = "./"; 

private static String hadoopConf = "hadoop-conf/"; 

static { 

    configuration.addResource(new Path(hadoopConf + "core-site.xml")); 
    configuration.addResource(new Path(hadoopConf + "hdfs-site.xml")); 
    configuration.addResource(new Path(hadoopConf + "mapred-site.xml")); 
    configuration.addResource(new Path(hadoopConf + "yarn-site.xml")); 



} 

public static boolean copyToLocalFileFromHdfs(String reportID) { 


    IFilePath filePath = GuiceDependency.getInstance(IFilePath.class); 

    String resultPath = filePath.getFinalResult(reportID) + "/part-r-00000"; 
    Path src = new Path(resultPath); 

    String exportPath = dir + reportID + ".csv"; 

    Path dst = new Path(exportPath); 
    System.out.println(configuration.get("fs.defaultFS")); 
    System.out.println("zxz copyToLocalFileFromHdfs scr: " 
      + src.toString() + " , dst: " + dst.toString()); 
    try { 

     System.out.println("zxz get fileSystem start "); 

     FileSystem fs = FileSystem.get(configuration); 
     System.out.println("zxz get fileSystem end " 
       + fs.getHomeDirectory().toString()); 
     System.out.println("zxz ~~~~~~~~~~~~~~~~~~~~~~~~~" 
       + fs.exists(src)); 
     ; 

     fs.copyToLocalFile(false, src, dst); 

     fs.copyToLocalFile(false, src, dst, true); 
    } catch (Exception e) { 

     // TODO Auto-generated catch block 
     e.printStackTrace(); 
     log.error("copyFromHDFSFile error : ", e); 

     return false; 
    } 
    System.out.println("zxz end copyToLocalFileFromHdfs for report: " 
      + reportID); 
    return true; 
} 

}

和核心的site.xml:

<?xml version="1.0" encoding="UTF-8"?> 

<!--Autogenerated by Cloudera CM on 2013-07-19T00:57:49.581Z--> 
<configuration> 
    <property> 
    <name>fs.defaultFS</name> 
    <value>hdfs://222.333.111.77:8020</value> 
    </property> 
    <property> 
    <name>fs.trash.interval</name> 
    <value>1</value> 
    </property> 
    <property> 
    <name>io.file.buffer.size</name> 
    <value>65536</value> 
    </property> 
    <property> 
    <name>hadoop.security.authentication</name> 
    <value>simple</value> 
    </property> 
    <property> 
    <name>hadoop.rpc.protection</name> 
    <value>authentication</value> 
    </property> 
    <property> 
    <name>hadoop.security.auth_to_local</name> 
    <value>DEFAULT</value> 
    </property> 
    <property> 
    <name>hadoop.native.lib</name> 
    <value>false</value> 
    <description>Should native hadoop libraries, if present, be used.</description> 
</property> 

</configuration> 

任何人都知道这个问题?非常感谢您的帮助〜

+0

我相信hdfs使用谷歌protobuf库。您的客户端代码似乎使用了错误(不兼容)的protobuf版本。尝试挖掘这个方向。我看到了类似的异常,但它由我们的管理员修复而不是我。 –

+0

感谢您的帮助〜我检查了这个问题,但我的客户没有使用任何其他protobuf,只是hadoop使用它。所以也许它不是因为不兼容的版本。你能给我更多的信息吗?它看起来像一个真正不常见的问题,我没有看到任何人有这个问题 – zxz

+0

谢谢〜问题解决了!我在远程主机中使用的hadoop-client版本是2.0.4-alpha,但在defaultFS中安装的hadoop版本是cdh4.3.0。非常感谢 – zxz

回答

6

我相信hdfs使用谷歌protobuf库。您的客户端代码似乎使用了错误(不兼容)的protobuf版本。

+0

非常感谢!问题解决了。客户端用cloudera安装,代码使用apache hadoop。对不起,这么晚回复 – zxz

+0

版本之间的差异是如何确定的? – 64k