Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 ea85801ce -> 40398d357


HDFS-13425. Ozone: Clean-up of ozone related change from hadoop-common-project. 
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40398d35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40398d35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40398d35

Branch: refs/heads/HDFS-7240
Commit: 40398d357b97ce26d0b347ad7d78df3188eab44a
Parents: ea85801
Author: Mukul Kumar Singh <msi...@apache.org>
Authored: Thu Apr 12 13:46:52 2018 +0530
Committer: Mukul Kumar Singh <msi...@apache.org>
Committed: Thu Apr 12 13:46:52 2018 +0530

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/FileUtil.java     |  67 +----------
 .../main/java/org/apache/hadoop/ipc/RPC.java    |   1 +
 .../main/java/org/apache/hadoop/util/Time.java  |   9 --
 .../hadoop/util/concurrent/HadoopExecutors.java |  10 --
 .../org/apache/hadoop/hdds/scm/TestArchive.java | 114 -------------------
 .../replication/ContainerSupervisor.java        |  11 +-
 6 files changed, 12 insertions(+), 200 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40398d35/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 0e349d3..8743be5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -38,7 +38,6 @@ import java.nio.file.FileSystems;
 import java.nio.file.Files;
 import java.util.ArrayList;
 import java.util.Enumeration;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ExecutionException;
@@ -48,19 +47,14 @@ import java.util.concurrent.Future;
 import java.util.jar.Attributes;
 import java.util.jar.JarOutputStream;
 import java.util.jar.Manifest;
-import java.util.zip.CRC32;
-import java.util.zip.CheckedOutputStream;
 import java.util.zip.GZIPInputStream;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipFile;
 import java.util.zip.ZipInputStream;
-import java.util.zip.ZipOutputStream;
 
-import com.google.common.base.Preconditions;
 import org.apache.commons.collections.map.CaseInsensitiveMap;
 import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
-import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -75,7 +69,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * A collection of file-processing util methods.
+ * A collection of file-processing util methods
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
@@ -613,65 +607,6 @@ public class FileUtil {
   }
 
   /**
-   * creates zip archieve of the source dir and writes a zip file.
-   *
-   * @param sourceDir - The directory to zip.
-   * @param archiveName - The destination file, the parent directory is assumed
-   * to exist.
-   * @return Checksum of the Archive.
-   * @throws IOException - Throws if zipFileName already exists or if the
-   *                     sourceDir does not exist.
-   */
-  public static Long zip(File sourceDir, File archiveName) throws IOException {
-    Preconditions.checkNotNull(sourceDir, "source directory cannot be null");
-    Preconditions.checkState(sourceDir.exists(), "source directory must " +
-        "exist");
-
-    Preconditions.checkNotNull(archiveName, "Destination file cannot be null");
-    Preconditions.checkNotNull(archiveName.getParent(), "Destination " +
-        "directory cannot be null");
-    Preconditions.checkState(new File(archiveName.getParent()).exists(),
-        "Destination directory must exist");
-    Preconditions.checkState(!archiveName.exists(), "Destination file " +
-        "already exists. Refusing to overwrite existing file.");
-
-    CheckedOutputStream checksum;
-    try (FileOutputStream outputStream =
-             new FileOutputStream(archiveName)) {
-      checksum = new CheckedOutputStream(outputStream, new CRC32());
-      byte[] data = new byte[BUFFER_SIZE];
-      try (ZipOutputStream out =
-               new ZipOutputStream(new BufferedOutputStream(checksum))) {
-
-        Iterator<File> fileIter = FileUtils.iterateFiles(sourceDir, null, 
true);
-        while (fileIter.hasNext()) {
-          File file = fileIter.next();
-          LOG.debug("Compressing file : " + file.getPath());
-          try (FileInputStream currentFile = new FileInputStream(file)) {
-            ZipEntry entry = new ZipEntry(file.getCanonicalPath());
-            out.putNextEntry(entry);
-            try (BufferedInputStream sourceFile
-                     = new BufferedInputStream(currentFile, BUFFER_SIZE)) {
-              int bytesRead;
-              while ((bytesRead = sourceFile.read(data, 0, BUFFER_SIZE)) !=
-                  -1) {
-                out.write(data, 0, bytesRead);
-              }
-            }
-          }
-        }
-        out.flush();
-      }
-    }
-    // Exit condition -- ZipFile must exist.
-    Preconditions.checkState(archiveName.exists(),
-        "Expected archive file missing: {}", archiveName.toPath());
-    long crc32 = checksum.getChecksum().getValue();
-    checksum.close();
-    return crc32;
-  }
-
-  /**
    * Given a stream input it will unzip the it in the unzip directory.
    * passed as the second parameter
    * @param inputStream The zip file as input

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40398d35/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 5a5082a..9cfadc7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -810,6 +810,7 @@ public class RPC {
   
   /** An RPC Server. */
   public abstract static class Server extends org.apache.hadoop.ipc.Server {
+
     boolean verbose;
 
     private static final Pattern COMPLEX_SERVER_NAME_PATTERN =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40398d35/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
index e099c8f..db5a567 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.util;
 
-import java.text.ParseException;
 import java.text.SimpleDateFormat;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -83,12 +82,4 @@ public final class Time {
   public static String formatTime(long millis) {
     return DATE_FORMAT.get().format(millis);
   }
-
-  /**
-   * Convert time in human readable format to millisecond.
-   * @return time in milliseconds
-   */
-  public static long formatDate(String date) throws ParseException {
-    return DATE_FORMAT.get().parse(date).getTime();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40398d35/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
index 35f6045..7a04c30 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
@@ -20,8 +20,6 @@
 
 package org.apache.hadoop.util.concurrent;
 
-import org.slf4j.Logger;
-
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.LinkedBlockingQueue;
@@ -43,14 +41,6 @@ public final class HadoopExecutors {
         threadFactory);
   }
 
-  public static ExecutorService newCachedThreadPool(ThreadFactory
-      threadFactory, int maxThreads) {
-    return new HadoopThreadPoolExecutor(0, maxThreads,
-        60L, TimeUnit.SECONDS,
-        new LinkedBlockingQueue<>(),
-        threadFactory);
-  }
-
   public static ExecutorService newFixedThreadPool(int nThreads,
       ThreadFactory threadFactory) {
     return new HadoopThreadPoolExecutor(nThreads, nThreads,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40398d35/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/TestArchive.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/TestArchive.java 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/TestArchive.java
deleted file mode 100644
index f53f770..0000000
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/TestArchive.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.fs.FileUtil;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.Iterator;
-import java.util.Random;
-import java.util.zip.Adler32;
-import java.util.zip.Checksum;
-
-/**
- * Test archive creation and unpacking.
- */
-public class TestArchive {
-  private static final int DIR_COUNT = 10;
-  private static final int SUB_DIR_COUNT = 3;
-  private static final int FILE_COUNT = 10;
-  private long checksumWrite = 0L;
-  private long checksumRead = 0L;
-  private long tmp = 0L;
-  private Checksum crc = new Adler32();
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  @Rule
-  public TemporaryFolder outputFolder = new TemporaryFolder();
-
-
-  @Before
-  public void setUp() throws Exception {
-    Random r = new Random();
-    final int megaByte = 1024 * 1024;
-
-    for (int x = 0; x < DIR_COUNT; x++) {
-      File subdir = folder.newFolder(String.format("dir%d", x));
-      for (int y = 0; y < SUB_DIR_COUNT; y++) {
-        File targetDir = new File(subdir.getPath().concat(File.separator)
-            .concat(String.format("subdir%d%d", x, y)));
-        if(!targetDir.mkdirs()) {
-          throw new IOException("Failed to create subdirectory. " +
-              targetDir.toString());
-        }
-        for (int z = 0; z < FILE_COUNT; z++) {
-          Path temp = Paths.get(targetDir.getPath().concat(File.separator)
-              .concat(String.format("File%d.txt", z)));
-          byte[] buf = RandomUtils.nextBytes(r.nextInt(megaByte));
-          Files.write(temp, buf);
-          crc.reset();
-          crc.update(buf, 0, buf.length);
-          tmp = crc.getValue();
-          checksumWrite +=tmp;
-        }
-      }
-    }
-  }
-
-  @Test
-  public void testArchive() throws Exception {
-    File archiveFile = new File(outputFolder.getRoot() + File.separator
-        + "test.container.zip");
-    long zipCheckSum = FileUtil.zip(folder.getRoot(), archiveFile);
-    Assert.assertTrue(zipCheckSum > 0);
-    File decomp = new File(outputFolder.getRoot() + File.separator +
-        "decompress");
-    if (!decomp.exists() && !decomp.mkdirs()) {
-      throw new IOException("Unable to create the destination directory. " +
-          decomp.getPath());
-    }
-
-    FileUtil.unZip(archiveFile, decomp);
-    String[] patterns = {"txt"};
-    Iterator<File> iter = FileUtils.iterateFiles(decomp, patterns, true);
-    int count = 0;
-    while (iter.hasNext()) {
-      count++;
-      byte[] buf = Files.readAllBytes(iter.next().toPath());
-      crc.reset();
-      crc.update(buf, 0, buf.length);
-      tmp = crc.getValue();
-      checksumRead += tmp;
-    }
-    Assert.assertEquals(DIR_COUNT * SUB_DIR_COUNT * FILE_COUNT, count);
-    Assert.assertEquals(checksumWrite, checksumRead);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40398d35/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
index 52321ee..c14303f 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
+import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,6 +40,8 @@ import java.util.List;
 import java.util.PriorityQueue;
 import java.util.Set;
 import java.util.concurrent.ExecutorService;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -129,7 +132,7 @@ public class ContainerSupervisor implements Closeable {
     this.poolQueue = new PriorityQueue<>();
     this.runnable = new AtomicBoolean(true);
     this.threadFaultCount = new AtomicInteger(0);
-    this.executorService = HadoopExecutors.newCachedThreadPool(
+    this.executorService = newCachedThreadPool(
         new ThreadFactoryBuilder().setDaemon(true)
             .setNameFormat("Container Reports Processing Thread - %d")
             .build(), maxContainerReportThreads);
@@ -139,6 +142,12 @@ public class ContainerSupervisor implements Closeable {
     initPoolProcessThread();
   }
 
+  private ExecutorService newCachedThreadPool(ThreadFactory threadFactory,
+      int maxThreads) {
+    return new HadoopThreadPoolExecutor(0, maxThreads, 60L, TimeUnit.SECONDS,
+        new LinkedBlockingQueue<>(), threadFactory);
+  }
+
   /**
    * Returns the number of pools that are under process right now.
    * @return  int - Number of pools that are in process.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to