[hadoop] branch branch-3.1.2 updated: Revert "YARN-8270 Adding JMX Metrics for Timeline Collector and Reader. Contributed by Sushil Ks."

2019-01-27 Thread rohithsharmaks
This is an automated email from the ASF dual-hosted git repository.

rohithsharmaks pushed a commit to branch branch-3.1.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1.2 by this push:
 new 1019dde  Revert "YARN-8270 Adding JMX Metrics for Timeline Collector 
and Reader. Contributed by Sushil Ks."
1019dde is described below

commit 1019dde65bcf12e05ef48ac71e84550d589e5d9a
Author: Rohith Sharma K S 
AuthorDate: Mon Jan 28 11:10:00 2019 +0530

Revert "YARN-8270 Adding JMX Metrics for Timeline Collector and Reader. 
Contributed by Sushil Ks."

This reverts commit 5b72aa04e104242d1761abf56822fb38e9915def.
---
 .../collector/TimelineCollectorWebService.java |  16 +-
 .../PerNodeAggTimelineCollectorMetrics.java| 117 ---
 .../metrics/TimelineReaderMetrics.java | 113 --
 .../timelineservice/metrics/package-info.java  |  28 
 .../reader/TimelineReaderWebServices.java  | 166 +++--
 .../TestPerNodeAggTimelineCollectorMetrics.java|  56 ---
 .../reader/TestTimelineReaderMetrics.java  |  56 ---
 7 files changed, 52 insertions(+), 500 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
index 150249b..4e51fca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
@@ -41,7 +41,6 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.http.JettyUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import 
org.apache.hadoop.yarn.api.records.timelineservice.ApplicationAttemptEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
@@ -54,7 +53,6 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.UserEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.metrics.PerNodeAggTimelineCollectorMetrics;
 import org.apache.hadoop.yarn.webapp.ForbiddenException;
 import org.apache.hadoop.yarn.webapp.NotFoundException;
 
@@ -79,8 +77,6 @@ public class TimelineCollectorWebService {
   LoggerFactory.getLogger(TimelineCollectorWebService.class);
 
   private @Context ServletContext context;
-  private static final PerNodeAggTimelineCollectorMetrics METRICS =
-  PerNodeAggTimelineCollectorMetrics.getInstance();
 
   /**
* Gives information about timeline collector.
@@ -155,15 +151,12 @@ public class TimelineCollectorWebService {
   TimelineEntities entities) {
 init(res);
 UserGroupInformation callerUgi = getUser(req);
-boolean isAsync = async != null && async.trim().equalsIgnoreCase("true");
 if (callerUgi == null) {
   String msg = "The owner of the posted timeline entities is not set";
   LOG.error(msg);
   throw new ForbiddenException(msg);
 }
 
-long startTime = Time.monotonicNow();
-boolean succeeded = false;
 try {
   ApplicationId appID = parseApplicationId(appId);
   if (appID == null) {
@@ -178,6 +171,7 @@ public class TimelineCollectorWebService {
 throw new NotFoundException("Application: "+ appId + " is not found");
   }
 
+  boolean isAsync = async != null && async.trim().equalsIgnoreCase("true");
   if (isAsync) {
 collector.putEntitiesAsync(processTimelineEntities(entities, appId,
 Boolean.valueOf(isSubAppEntities)), callerUgi);
@@ -186,7 +180,6 @@ public class TimelineCollectorWebService {
 Boolean.valueOf(isSubAppEntities)), callerUgi);
   }
 
-  succeeded = true;
   return Response.ok().build();
 } catch (NotFoundException | ForbiddenException e) {
   throw new WebApplicationException(e,
@@ -195,13 +188,6 @@ public class TimelineCollectorWebService {
   LOG.error("Error putting entities", e);
   throw new WebApplicationException(e,
   Response.Status.INTERNAL_SERVER_ERROR);
-} finally {
-  long 

[hadoop] branch branch-3.1 updated: Revert "YARN-8270 Adding JMX Metrics for Timeline Collector and Reader. Contributed by Sushil Ks."

2019-01-27 Thread rohithsharmaks
This is an automated email from the ASF dual-hosted git repository.

rohithsharmaks pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 6e059c7  Revert "YARN-8270 Adding JMX Metrics for Timeline Collector 
and Reader. Contributed by Sushil Ks."
6e059c7 is described below

commit 6e059c793069a1fc7d89ae2b3e680b634a3e0a81
Author: Rohith Sharma K S 
AuthorDate: Mon Jan 28 10:55:12 2019 +0530

Revert "YARN-8270 Adding JMX Metrics for Timeline Collector and Reader. 
Contributed by Sushil Ks."

This reverts commit 5b72aa04e104242d1761abf56822fb38e9915def.
---
 .../collector/TimelineCollectorWebService.java |  16 +-
 .../PerNodeAggTimelineCollectorMetrics.java| 117 ---
 .../metrics/TimelineReaderMetrics.java | 113 --
 .../timelineservice/metrics/package-info.java  |  28 
 .../reader/TimelineReaderWebServices.java  | 166 +++--
 .../TestPerNodeAggTimelineCollectorMetrics.java|  56 ---
 .../reader/TestTimelineReaderMetrics.java  |  56 ---
 7 files changed, 52 insertions(+), 500 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
index 150249b..4e51fca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorWebService.java
@@ -41,7 +41,6 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.http.JettyUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import 
org.apache.hadoop.yarn.api.records.timelineservice.ApplicationAttemptEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
@@ -54,7 +53,6 @@ import 
org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.UserEntity;
-import 
org.apache.hadoop.yarn.server.timelineservice.metrics.PerNodeAggTimelineCollectorMetrics;
 import org.apache.hadoop.yarn.webapp.ForbiddenException;
 import org.apache.hadoop.yarn.webapp.NotFoundException;
 
@@ -79,8 +77,6 @@ public class TimelineCollectorWebService {
   LoggerFactory.getLogger(TimelineCollectorWebService.class);
 
   private @Context ServletContext context;
-  private static final PerNodeAggTimelineCollectorMetrics METRICS =
-  PerNodeAggTimelineCollectorMetrics.getInstance();
 
   /**
* Gives information about timeline collector.
@@ -155,15 +151,12 @@ public class TimelineCollectorWebService {
   TimelineEntities entities) {
 init(res);
 UserGroupInformation callerUgi = getUser(req);
-boolean isAsync = async != null && async.trim().equalsIgnoreCase("true");
 if (callerUgi == null) {
   String msg = "The owner of the posted timeline entities is not set";
   LOG.error(msg);
   throw new ForbiddenException(msg);
 }
 
-long startTime = Time.monotonicNow();
-boolean succeeded = false;
 try {
   ApplicationId appID = parseApplicationId(appId);
   if (appID == null) {
@@ -178,6 +171,7 @@ public class TimelineCollectorWebService {
 throw new NotFoundException("Application: "+ appId + " is not found");
   }
 
+  boolean isAsync = async != null && async.trim().equalsIgnoreCase("true");
   if (isAsync) {
 collector.putEntitiesAsync(processTimelineEntities(entities, appId,
 Boolean.valueOf(isSubAppEntities)), callerUgi);
@@ -186,7 +180,6 @@ public class TimelineCollectorWebService {
 Boolean.valueOf(isSubAppEntities)), callerUgi);
   }
 
-  succeeded = true;
   return Response.ok().build();
 } catch (NotFoundException | ForbiddenException e) {
   throw new WebApplicationException(e,
@@ -195,13 +188,6 @@ public class TimelineCollectorWebService {
   LOG.error("Error putting entities", e);
   throw new WebApplicationException(e,
   Response.Status.INTERNAL_SERVER_ERROR);
-} finally {
-  long 

[hadoop] branch HDFS-13891 updated: HDFS-14224. RBF: NPE in getContentSummary() for getEcPolicy() in case of multiple destinations. Contributed by Ayush Saxena.

2019-01-27 Thread brahma
This is an automated email from the ASF dual-hosted git repository.

brahma pushed a commit to branch HDFS-13891
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/HDFS-13891 by this push:
 new caceff1  HDFS-14224. RBF: NPE in getContentSummary() for getEcPolicy() 
in case of multiple destinations. Contributed by Ayush Saxena.
caceff1 is described below

commit caceff1d6033a10b8de55e304bf8b8334d69d120
Author: Brahma Reddy Battula 
AuthorDate: Mon Jan 28 09:03:32 2019 +0530

HDFS-14224. RBF: NPE in getContentSummary() for getEcPolicy() in case of 
multiple destinations. Contributed by Ayush Saxena.
---
 .../server/federation/router/RouterClientProtocol.java   |  7 +++
 .../federation/router/TestRouterRpcMultiDestination.java | 16 
 2 files changed, 23 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
index c724b17..2d52ecb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
@@ -1628,6 +1628,7 @@ public class RouterClientProtocol implements 
ClientProtocol {
 long quota = 0;
 long spaceConsumed = 0;
 long spaceQuota = 0;
+String ecPolicy = "";
 
 for (ContentSummary summary : summaries) {
   length += summary.getLength();
@@ -1636,6 +1637,11 @@ public class RouterClientProtocol implements 
ClientProtocol {
   quota += summary.getQuota();
   spaceConsumed += summary.getSpaceConsumed();
   spaceQuota += summary.getSpaceQuota();
+  // We return from the first response as we assume that the EC policy
+  // of each sub-cluster is same.
+  if (ecPolicy.isEmpty()) {
+ecPolicy = summary.getErasureCodingPolicy();
+  }
 }
 
 ContentSummary ret = new ContentSummary.Builder()
@@ -1645,6 +1651,7 @@ public class RouterClientProtocol implements 
ClientProtocol {
 .quota(quota)
 .spaceConsumed(spaceConsumed)
 .spaceQuota(spaceQuota)
+.erasureCodingPolicy(ecPolicy)
 .build();
 return ret;
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
index 3101748..3d941bb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
@@ -41,6 +41,7 @@ import java.util.TreeSet;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -230,6 +231,21 @@ public class TestRouterRpcMultiDestination extends 
TestRouterRpc {
   }
 
   @Test
+  public void testGetContentSummaryEc() throws Exception {
+DistributedFileSystem routerDFS =
+(DistributedFileSystem) getRouterFileSystem();
+Path dir = new Path("/");
+String expectedECPolicy = "RS-6-3-1024k";
+try {
+  routerDFS.setErasureCodingPolicy(dir, expectedECPolicy);
+  assertEquals(expectedECPolicy,
+  routerDFS.getContentSummary(dir).getErasureCodingPolicy());
+} finally {
+  routerDFS.unsetErasureCodingPolicy(dir);
+}
+  }
+
+  @Test
   public void testSubclusterDown() throws Exception {
 final int totalFiles = 6;
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-989. Check Hdds Volumes for errors. Contributed by Arpit Agarwal.

2019-01-27 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3b49d7a  HDDS-989. Check Hdds Volumes for errors. Contributed by Arpit 
Agarwal.
3b49d7a is described below

commit 3b49d7aeae8819ce7c2c4f4fec057dd9e75dedf1
Author: Arpit Agarwal 
AuthorDate: Sun Jan 27 11:18:30 2019 -0800

HDDS-989. Check Hdds Volumes for errors. Contributed by Arpit Agarwal.
---
 .../container/common/volume/AbstractFuture.java| 1291 
 .../ozone/container/common/volume/HddsVolume.java  |   24 +-
 .../container/common/volume/HddsVolumeChecker.java |  418 +++
 .../common/volume/ThrottledAsyncChecker.java   |  245 
 .../container/common/volume/TimeoutFuture.java |  161 +++
 .../ozone/container/common/volume/VolumeSet.java   |  116 +-
 .../ozone/container/ozoneimpl/OzoneContainer.java  |1 +
 .../common/volume/TestHddsVolumeChecker.java   |  212 
 .../common/volume/TestVolumeSetDiskChecks.java |  185 +++
 9 files changed, 2643 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
new file mode 100644
index 000..438692c
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
@@ -0,0 +1,1291 @@
+/*
+ * Copyright (C) 2007 The Guava Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+/**
+ * Some portions of this class have been modified to make it functional in this
+ * package.
+ */
+package org.apache.hadoop.ozone.container.common.volume;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.GwtCompatible;
+import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkNotNull;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.common.util.concurrent.Uninterruptibles;
+import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
+.newUpdater;
+
+import javax.annotation.Nullable;
+import java.security.AccessController;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executor;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy;
+import java.util.concurrent.locks.LockSupport;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * An abstract implementation of {@link ListenableFuture}, intended for
+ * advanced users only. More common ways to create a {@code ListenableFuture}
+ * include instantiating a {@link SettableFuture}, submitting a task to a
+ * {@link ListeningExecutorService}, and deriving a {@code Future} from an
+ * existing one, typically using methods like {@link Futures#transform
+ * (ListenableFuture, com.google.common.base.Function) Futures.transform}
+ * and its overloaded versions.
+ * 
+ * This class implements all methods in {@code ListenableFuture}.
+ * Subclasses should provide a way to set the result of the computation
+ * through the protected methods {@link #set(Object)},
+ * {@link #setFuture(ListenableFuture)} and {@link #setException(Throwable)}.
+ * Subclasses may also override {@link #interruptTask()}, which will be
+ * invoked automatically if a call to {@link #cancel(boolean) cancel(true)}
+ * succeeds in canceling the future. Subclasses should rarely override other
+ * methods.
+ */
+
+@GwtCompatible(emulated = true)
+public abstract class AbstractFuture implements ListenableFuture {
+  // NOTE: Whenever both tests are cheap and functional, it's faster to use &,
+  // | instead of &&, ||
+
+  private static final boolean GENERATE_CANCELLATION_CAUSES =
+  

[hadoop] branch branch-3.2 updated: HADOOP-16075. Upgrade checkstyle version to 8.16.

2019-01-27 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 6368218  HADOOP-16075. Upgrade checkstyle version to 8.16.
6368218 is described below

commit 636821854c9f3cd50d168ccfd8cf642a080f3a26
Author: Dinesh Chitlangia 
AuthorDate: Sun Jan 27 17:00:35 2019 +

HADOOP-16075. Upgrade checkstyle version to 8.16.

Contributed by Dinesh Chitlangia.

(cherry picked from commit 47d6b9bb7f6c3efb9681f8228df356cc668fe0d3)
---
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pom.xml b/pom.xml
index f990f63..5b02ed5 100644
--- a/pom.xml
+++ b/pom.xml
@@ -111,7 +111,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 2.5.0
 1.0.0
 3.0.0
-8.8
+8.16
 1.4.3
 
 bash


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16075. Upgrade checkstyle version to 8.16.

2019-01-27 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 47d6b9b  HADOOP-16075. Upgrade checkstyle version to 8.16.
47d6b9b is described below

commit 47d6b9bb7f6c3efb9681f8228df356cc668fe0d3
Author: Dinesh Chitlangia 
AuthorDate: Sun Jan 27 16:59:28 2019 +

HADOOP-16075. Upgrade checkstyle version to 8.16.

Contributed by Dinesh Chitlangia.
---
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pom.xml b/pom.xml
index 327fcdd..b927077 100644
--- a/pom.xml
+++ b/pom.xml
@@ -111,7 +111,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 2.5.0
 1.0.0
 3.0.0
-8.8
+8.16
 1.4.3
 
 bash


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.9 updated: HADOOP-16049. DistCp result has data and checksum mismatch when blocks per chunk > 0.

2019-01-27 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.9 by this push:
 new f55a2d6  HADOOP-16049. DistCp result has data and checksum mismatch 
when blocks per chunk > 0.
f55a2d6 is described below

commit f55a2d6f742f43071ccb3c19e1444a076c706a2e
Author: Kai Xie 
AuthorDate: Sun Jan 27 16:58:12 2019 +

HADOOP-16049. DistCp result has data and checksum mismatch when blocks per 
chunk > 0.

Contributed by Kai Xie.

(cherry picked from commit 6d3e7a8570ce22f1adcce0b9cef6959c273d6ba7)
---
 .../tools/mapred/RetriableFileCopyCommand.java |  24 +++--
 .../hadoop/tools/util/ThrottledInputStream.java|  48 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java|  49 --
 .../hadoop/tools/TestDistCpSyncReverseBase.java| 102 +
 .../apache/hadoop/tools/mapred/TestCopyMapper.java |  24 -
 5 files changed, 170 insertions(+), 77 deletions(-)

diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
index ddf2725..7d7ebd4 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
@@ -257,7 +257,8 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
 boolean finished = false;
 try {
   inStream = getInputStream(source, context.getConfiguration());
-  int bytesRead = readBytes(inStream, buf, sourceOffset);
+  seekIfRequired(inStream, sourceOffset);
+  int bytesRead = readBytes(inStream, buf);
   while (bytesRead >= 0) {
 if (chunkLength > 0 &&
 (totalBytesRead + bytesRead) >= chunkLength) {
@@ -273,7 +274,7 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
 if (finished) {
   break;
 }
-bytesRead = readBytes(inStream, buf, sourceOffset);
+bytesRead = readBytes(inStream, buf);
   }
   outStream.close();
   outStream = null;
@@ -296,13 +297,20 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
 context.setStatus(message.toString());
   }
 
-  private static int readBytes(ThrottledInputStream inStream, byte buf[],
-  long position) throws IOException {
+  private static int readBytes(ThrottledInputStream inStream, byte[] buf)
+  throws IOException {
+try {
+  return inStream.read(buf);
+} catch (IOException e) {
+  throw new CopyReadException(e);
+}
+  }
+
+  private static void seekIfRequired(ThrottledInputStream inStream,
+ long sourceOffset) throws IOException {
 try {
-  if (position == 0) {
-return inStream.read(buf);
-  } else {
-return inStream.read(position, buf, 0, buf.length);
+  if (sourceOffset != inStream.getPos()) {
+inStream.seek(sourceOffset);
   }
 } catch (IOException e) {
   throw new CopyReadException(e);
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
index a0fa0c8..19fcb0a 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.tools.util;
 
-import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
 
 import java.io.IOException;
 import java.io.InputStream;
@@ -33,7 +33,7 @@ import java.io.InputStream;
  * (Thus, while the read-rate might exceed the maximum for a given short 
interval,
  * the average tends towards the specified maximum, overall.)
  */
-public class ThrottledInputStream extends InputStream {
+public class ThrottledInputStream extends InputStream implements Seekable {
 
   private final InputStream rawStream;
   private final long maxBytesPerSec;
@@ -95,25 +95,6 @@ public class ThrottledInputStream extends InputStream {
 return readLen;
   }
 
-  /**
-   * Read bytes starting from the specified position. This requires rawStream 
is
-   * an instance of {@link PositionedReadable}.
-   */
-  public int read(long position, byte[] buffer, int offset, int length)
-  throws IOException {
-if (!(rawStream instanceof PositionedReadable)) {
-  throw new UnsupportedOperationException(
-  "positioned read is not supported by the internal stream");
-}
-throttle();
-int readLen = ((PositionedReadable) rawStream).read(position, 

[hadoop] branch branch-2 updated: HADOOP-16049. DistCp result has data and checksum mismatch when blocks per chunk > 0.

2019-01-27 Thread stevel
This is an automated email from the ASF dual-hosted git repository.

stevel pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 6d3e7a8  HADOOP-16049. DistCp result has data and checksum mismatch 
when blocks per chunk > 0.
6d3e7a8 is described below

commit 6d3e7a8570ce22f1adcce0b9cef6959c273d6ba7
Author: Kai Xie 
AuthorDate: Sun Jan 27 16:56:57 2019 +

HADOOP-16049. DistCp result has data and checksum mismatch when blocks per 
chunk > 0.

Contributed by Kai Xie.
---
 .../tools/mapred/RetriableFileCopyCommand.java |  24 +++--
 .../hadoop/tools/util/ThrottledInputStream.java|  48 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java|  49 --
 .../hadoop/tools/TestDistCpSyncReverseBase.java| 102 +
 .../apache/hadoop/tools/mapred/TestCopyMapper.java |  24 -
 5 files changed, 170 insertions(+), 77 deletions(-)

diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
index ddf2725..7d7ebd4 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
@@ -257,7 +257,8 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
 boolean finished = false;
 try {
   inStream = getInputStream(source, context.getConfiguration());
-  int bytesRead = readBytes(inStream, buf, sourceOffset);
+  seekIfRequired(inStream, sourceOffset);
+  int bytesRead = readBytes(inStream, buf);
   while (bytesRead >= 0) {
 if (chunkLength > 0 &&
 (totalBytesRead + bytesRead) >= chunkLength) {
@@ -273,7 +274,7 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
 if (finished) {
   break;
 }
-bytesRead = readBytes(inStream, buf, sourceOffset);
+bytesRead = readBytes(inStream, buf);
   }
   outStream.close();
   outStream = null;
@@ -296,13 +297,20 @@ public class RetriableFileCopyCommand extends 
RetriableCommand {
 context.setStatus(message.toString());
   }
 
-  private static int readBytes(ThrottledInputStream inStream, byte buf[],
-  long position) throws IOException {
+  private static int readBytes(ThrottledInputStream inStream, byte[] buf)
+  throws IOException {
+try {
+  return inStream.read(buf);
+} catch (IOException e) {
+  throw new CopyReadException(e);
+}
+  }
+
+  private static void seekIfRequired(ThrottledInputStream inStream,
+ long sourceOffset) throws IOException {
 try {
-  if (position == 0) {
-return inStream.read(buf);
-  } else {
-return inStream.read(position, buf, 0, buf.length);
+  if (sourceOffset != inStream.getPos()) {
+inStream.seek(sourceOffset);
   }
 } catch (IOException e) {
   throw new CopyReadException(e);
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
index a0fa0c8..19fcb0a 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ThrottledInputStream.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.tools.util;
 
-import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
 
 import java.io.IOException;
 import java.io.InputStream;
@@ -33,7 +33,7 @@ import java.io.InputStream;
  * (Thus, while the read-rate might exceed the maximum for a given short 
interval,
  * the average tends towards the specified maximum, overall.)
  */
-public class ThrottledInputStream extends InputStream {
+public class ThrottledInputStream extends InputStream implements Seekable {
 
   private final InputStream rawStream;
   private final long maxBytesPerSec;
@@ -95,25 +95,6 @@ public class ThrottledInputStream extends InputStream {
 return readLen;
   }
 
-  /**
-   * Read bytes starting from the specified position. This requires rawStream 
is
-   * an instance of {@link PositionedReadable}.
-   */
-  public int read(long position, byte[] buffer, int offset, int length)
-  throws IOException {
-if (!(rawStream instanceof PositionedReadable)) {
-  throw new UnsupportedOperationException(
-  "positioned read is not supported by the internal stream");
-}
-throttle();
-int readLen = ((PositionedReadable) rawStream).read(position, buffer,
-offset, length);
-if (readLen != -1) {
-  bytesRead +=