[ 
https://issues.apache.org/jira/browse/HDFS-9617?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15088904#comment-15088904
 ] 

zuotingbing commented on HDFS-9617:
-----------------------------------

Thank you for your reply. 

my client code :
【main class】
public class HadoopLoader {
  public HadoopLoader() {
  }

  public static void main(String[] args) {
    HadoopLoader hadoopLoader = new HadoopLoader();

    //上传数据
    hadoopLoader.upload();
  }

  private void upload() {
    new UploadProcess().upload();
  }
====================================================================================================
public class UploadProcess {
  private ExecutorService executorService;
  private Map<String, Boolean> processingFileMap = new 
ConcurrentHashMap<String, Boolean>();

  public void upload() {
    executorService = 
Executors.newFixedThreadPool(HadoopLoader.CONFIG_PROPERTIES.getHandleNum());

    for (int i = 0; i < 1000; i++) {
      processLoad("/home/ztb/testdata/43.bmp", 
"hdfs://10.43.156.157:9000/ztbtest");
    }
  }

  private void processLoad(String filePathName, String hdfsFilePathName) {
    LoadThread loadThread = new LoadThread(filePathName, hdfsFilePathName);
    executorService.execute(loadThread);
  }

}

===================================================================================================
public class LoadThread implements Runnable {
  private static final org.apache.commons.logging.Log LOG = 
LogFactory.getLog(LoadThread.class);

  String filePathName;         //数据文件完整名称(路径名+文件名)
  String hdfsFilePathName;     //数据文件完整名称(路径名+文件名)

  public LoadThread(String filePathName, String hdfsFilePathName) {
    this.filePathName = filePathName;
    this.hdfsFilePathName = hdfsFilePathName;
  }

  public void writeToHdfs(String filePathName, String hdfsFilePathName) throws 
IOException {
    LOG.info("Start to upload " + filePathName + " to " + hdfsFilePathName);
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(hdfsFilePathName), conf);
    InputStream in = null;
    OutputStream out = null;
    Path hdfsFilePath;
    try {
      in = new BufferedInputStream(new FileInputStream(filePathName));
      hdfsFilePath = new Path(hdfsFilePathName);
      out = fs.create(hdfsFilePath);
      IOUtils.copyBytes(in, out, conf);
    } finally {
      if (in != null) {
        in.close();
      }
      if (out != null) {
        out.close();
      }
    }
    LOG.info("Finish uploading " + filePathName + " to " + hdfsFilePathName);
  }

  @Override
  public void run() {
    try {
      writeToHdfs(filePathName, hdfsFilePathName);
    } catch (IOException e) {
      LOG.error(e.getMessage(), e);
    }
  }

}




i get java_pid8820.hprof when i set -XX:+HeapDumpOnOutOfMemoryError


> my java client use muti-thread to put a same file to a same hdfs uri, after 
> no lease error,then client OutOfMemoryError
> -----------------------------------------------------------------------------------------------------------------------
>
>                 Key: HDFS-9617
>                 URL: https://issues.apache.org/jira/browse/HDFS-9617
>             Project: Hadoop HDFS
>          Issue Type: Bug
>            Reporter: zuotingbing
>
> org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException):
>  No lease on /Tmp2/43.bmp.tmp (inode 2913263): File does not exist. [Lease.  
> Holder: DFSClient_NONMAPREDUCE_2084151715_1, pendingcreates: 250]
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3358)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.analyzeFileState(FSNamesystem.java:3160)
>       at 
> org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:3042)
>       at 
> org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:615)
>       at 
> org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.addBlock(AuthorizationProviderProxyClientProtocol.java:188)
>       at 
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.addBlock(ClientNamenodeProtocolServerSideTranslatorPB.java:476)
>       at 
> org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
>       at 
> org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:587)
>       at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1026)
>       at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2013)
>       at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2009)
>       at java.security.AccessController.doPrivileged(Native Method)
>       at javax.security.auth.Subject.doAs(Subject.java:415)
>       at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1653)
>       at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2007)
>       at org.apache.hadoop.ipc.Client.call(Client.java:1411)
>       at org.apache.hadoop.ipc.Client.call(Client.java:1364)
>       at 
> org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206)
>       at com.sun.proxy.$Proxy14.addBlock(Unknown Source)
>       at 
> org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.addBlock(ClientNamenodeProtocolTranslatorPB.java:391)
>       at sun.reflect.GeneratedMethodAccessor66.invoke(Unknown Source)
>       at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>       at java.lang.reflect.Method.invoke(Method.java:606)
>       at 
> org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187)
>       at 
> org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
>       at com.sun.proxy.$Proxy15.addBlock(Unknown Source)
>       at 
> org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.locateFollowingBlock(DFSOutputStream.java:1473)
>       at 
> org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.nextBlockOutputStream(DFSOutputStream.java:1290)
>       at 
> org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer.run(DFSOutputStream.java:536)
> my java client(JVM -Xmx=2G) :
> jmap TOP15:
> num     #instances         #bytes  class name
> ----------------------------------------------
>    1:         48072     2053976792  [B
>    2:         45852        5987568  <constMethodKlass>
>    3:         45852        5878944  <methodKlass>
>    4:          3363        4193112  <constantPoolKlass>
>    5:          3363        2548168  <instanceKlassKlass>
>    6:          2733        2299008  <constantPoolCacheKlass>
>    7:           533        2191696  [Ljava.nio.ByteBuffer;
>    8:         24733        2026600  [C
>    9:         31287        2002368  
> org.apache.hadoop.hdfs.DFSOutputStream$Packet
>   10:         31972         767328  java.util.LinkedList$Node
>   11:         22845         548280  java.lang.String
>   12:         20372         488928  java.util.concurrent.atomic.AtomicLong
>   13:          3700         452984  java.lang.Class
>   14:           981         439576  <methodDataKlass>
>   15:          5583         376344  [S



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

Reply via email to