[hadoop] branch trunk updated: HDDS-1861. Fix TableCacheImpl cleanup logic. (#1165)

2019-07-25 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3426777  HDDS-1861. Fix TableCacheImpl cleanup logic. (#1165)
3426777 is described below

commit 3426777140d6dfd7bda13c23eb030fea75201307
Author: Bharat Viswanadham 
AuthorDate: Thu Jul 25 22:30:06 2019 -0700

HDDS-1861. Fix TableCacheImpl cleanup logic. (#1165)
---
 .../main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java
index 7a9d55b..e6dabf1 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/cache/TableCacheImpl.java
@@ -21,8 +21,9 @@ package org.apache.hadoop.utils.db.cache;
 
 import java.util.Iterator;
 import java.util.Map;
-import java.util.TreeSet;
+import java.util.NavigableSet;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ThreadFactory;
@@ -47,7 +48,7 @@ public class TableCacheImpl implements TableCache 
{
 
   private final ConcurrentHashMap cache;
-  private final TreeSet> epochEntries;
+  private final NavigableSet> epochEntries;
   private ExecutorService executorService;
   private CacheCleanupPolicy cleanupPolicy;
 
@@ -55,7 +56,7 @@ public class TableCacheImpl();
-epochEntries = new TreeSet<>();
+epochEntries = new ConcurrentSkipListSet<>();
 // Created a singleThreadExecutor, so one cleanup will be running at a
 // time.
 ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14303. Addendum: check block directory logic not correct when there is only meta file, print no meaning warn log. Contributed by qiang Liu.

2019-07-25 Thread ayushsaxena
This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ce99cc3  HDFS-14303. Addendum: check block directory logic not correct 
when there is only meta file, print no meaning warn log. Contributed by qiang 
Liu.
ce99cc3 is described below

commit ce99cc31e9c34504669c30b160eb55c7cacd9966
Author: Ayush Saxena 
AuthorDate: Fri Jul 26 08:22:22 2019 +0530

HDFS-14303. Addendum: check block directory logic not correct when there is 
only meta file, print no meaning warn log. Contributed by qiang Liu.
---
 .../org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index a6fb1ab..67589e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -1117,8 +1117,9 @@ public class TestDirectoryScanner {
 
   @Test
   public void testDirectoryScannerInFederatedCluster() throws Exception {
+HdfsConfiguration conf = new HdfsConfiguration(CONF);
 // Create Federated cluster with two nameservices and one DN
-try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF)
+try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2))
 .numDataNodes(1).build()) {
   cluster.waitActive();
@@ -1134,7 +1135,7 @@ public class TestDirectoryScanner {
   int bp2Files = 2;
   writeFile(fs2, bp2Files);
   // Call the Directory scanner
-  scanner = new DirectoryScanner(fds, CONF);
+  scanner = new DirectoryScanner(fds, conf);
   scanner.setRetainDiffs(true);
   scanner.reconcile();
   // Check blocks in corresponding BP


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1855. TestStorageContainerManager#testScmProcessDatanodeHeartbeat is failing. (#1153)

2019-07-25 Thread sammichen
This is an automated email from the ASF dual-hosted git repository.

sammichen pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a2cc961  HDDS-1855. 
TestStorageContainerManager#testScmProcessDatanodeHeartbeat is failing. (#1153)
a2cc961 is described below

commit a2cc961086928168f8149273d9d5bcb66055b138
Author: Nanda kumar 
AuthorDate: Fri Jul 26 08:24:23 2019 +0530

HDDS-1855. TestStorageContainerManager#testScmProcessDatanodeHeartbeat is 
failing. (#1153)
---
 .../hadoop/ozone/TestStorageContainerManager.java  | 22 +++---
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 3ac5993..8b0af2a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.net.DNSToSwitchMapping;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -500,7 +501,9 @@ public class TestStorageContainerManager {
 String scmId = UUID.randomUUID().toString();
 conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
 StaticMapping.class, DNSToSwitchMapping.class);
-StaticMapping.addNodeToRack(HddsUtils.getHostName(conf), "/rack1");
+StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
+Collections.singleton(HddsUtils.getHostName(conf))).get(0),
+"/rack1");
 
 final int datanodeNum = 3;
 MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
@@ -520,21 +523,18 @@ public class TestStorageContainerManager {
   Thread.sleep(heartbeatCheckerIntervalMs * 2);
 
   List allNodes = scm.getScmNodeManager().getAllNodes();
-  Assert.assertTrue(allNodes.size() == datanodeNum);
-  for (int i = 0; i < allNodes.size(); i++) {
+  Assert.assertEquals(datanodeNum, allNodes.size());
+  for (DatanodeDetails node : allNodes) {
 DatanodeInfo datanodeInfo = (DatanodeInfo) scm.getScmNodeManager()
-.getNodeByUuid(allNodes.get(i).getUuidString());
+.getNodeByUuid(node.getUuidString());
 Assert.assertTrue((datanodeInfo.getLastHeartbeatTime() - start)
 >= heartbeatCheckerIntervalMs);
-Assert.assertTrue(datanodeInfo.getUuidString()
-.equals(datanodeInfo.getNetworkName()));
-Assert.assertTrue(datanodeInfo.getNetworkLocation()
-.equals("/rack1"));
+Assert.assertEquals(datanodeInfo.getUuidString(),
+datanodeInfo.getNetworkName());
+Assert.assertEquals("/rack1", datanodeInfo.getNetworkLocation());
   }
 } finally {
-  if (cluster != null) {
-cluster.shutdown();
-  }
+  cluster.shutdown();
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2 updated: HDFS-14135. TestWebHdfsTimeouts Fails intermittently in trunk. Contributed by Ayush Saxena.

2019-07-25 Thread iwasakims
This is an automated email from the ASF dual-hosted git repository.

iwasakims pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new 3bfae20  HDFS-14135. TestWebHdfsTimeouts Fails intermittently in 
trunk. Contributed by Ayush Saxena.
3bfae20 is described below

commit 3bfae20e946e40f6936aab54b79fc85df8d3b0b2
Author: Masatake Iwasaki 
AuthorDate: Fri Jun 21 14:33:41 2019 +0900

HDFS-14135. TestWebHdfsTimeouts Fails intermittently in trunk. Contributed 
by Ayush Saxena.

Signed-off-by: Masatake Iwasaki 
(cherry picked from commit 6b8107ad97251267253fa045ba03c4749f95f530)
---
 .../hadoop/hdfs/web/TestWebHdfsTimeouts.java   | 28 ++
 1 file changed, 28 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
index 67c39e1..fe4b91c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
@@ -35,6 +35,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -47,6 +48,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
+import org.junit.AssumptionViolatedException;
 import org.junit.Before;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -84,6 +86,7 @@ public class TestWebHdfsTimeouts {
   return conn;
 }
   });
+  private volatile boolean failedToConsumeBacklog;
 
   public enum TimeoutSource { ConnectionFactory, Configuration };
 
@@ -122,6 +125,7 @@ public class TestWebHdfsTimeouts {
 
 clients = new ArrayList();
 serverThread = null;
+failedToConsumeBacklog = false;
   }
 
   @After
@@ -211,6 +215,7 @@ public class TestWebHdfsTimeouts {
   fs.getFileChecksum(new Path("/file"));
   fail("expected timeout");
 } catch (SocketTimeoutException e) {
+  assumeBacklogConsumed();
   GenericTestUtils.assertExceptionContains(
   fs.getUri().getAuthority() + ": connect timed out", e);
 }
@@ -244,6 +249,7 @@ public class TestWebHdfsTimeouts {
   os = fs.create(new Path("/file"));
   fail("expected timeout");
 } catch (SocketTimeoutException e) {
+  assumeBacklogConsumed();
   GenericTestUtils.assertExceptionContains(
   fs.getUri().getAuthority() + ": connect timed out", e);
 } finally {
@@ -357,6 +363,28 @@ public class TestWebHdfsTimeouts {
   client.connect(nnHttpAddress);
   clients.add(client);
 }
+try {
+  GenericTestUtils.waitFor(() -> {
+try (SocketChannel c = SocketChannel.open()) {
+  c.socket().connect(nnHttpAddress, 100);
+} catch (SocketTimeoutException e) {
+  return true;
+} catch (IOException e) {
+  LOG.debug("unexpected exception: " + e);
+}
+return false;
+  }, 100, 1);
+} catch (TimeoutException | InterruptedException e) {
+  failedToConsumeBacklog = true;
+  assumeBacklogConsumed();
+}
+  }
+
+  private void assumeBacklogConsumed() {
+if (failedToConsumeBacklog) {
+  throw new AssumptionViolatedException(
+  "failed to fill up connection backlog.");
+}
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-14135. TestWebHdfsTimeouts Fails intermittently in trunk. Contributed by Ayush Saxena.

2019-07-25 Thread iwasakims
This is an automated email from the ASF dual-hosted git repository.

iwasakims pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new bf4a225  HDFS-14135. TestWebHdfsTimeouts Fails intermittently in 
trunk. Contributed by Ayush Saxena.
bf4a225 is described below

commit bf4a2258ce280120778b3f04844330df81c967b0
Author: Masatake Iwasaki 
AuthorDate: Fri Jun 21 14:33:41 2019 +0900

HDFS-14135. TestWebHdfsTimeouts Fails intermittently in trunk. Contributed 
by Ayush Saxena.

Signed-off-by: Masatake Iwasaki 
(cherry picked from commit 6b8107ad97251267253fa045ba03c4749f95f530)
---
 .../hadoop/hdfs/web/TestWebHdfsTimeouts.java   | 28 ++
 1 file changed, 28 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
index 4743821..ede7f35 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
@@ -35,6 +35,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -47,6 +48,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
+import org.junit.AssumptionViolatedException;
 import org.junit.Before;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -84,6 +86,7 @@ public class TestWebHdfsTimeouts {
   return conn;
 }
   });
+  private volatile boolean failedToConsumeBacklog;
 
   public enum TimeoutSource { ConnectionFactory, Configuration };
 
@@ -122,6 +125,7 @@ public class TestWebHdfsTimeouts {
 
 clients = new ArrayList();
 serverThread = null;
+failedToConsumeBacklog = false;
   }
 
   @After
@@ -211,6 +215,7 @@ public class TestWebHdfsTimeouts {
   fs.getFileChecksum(new Path("/file"));
   fail("expected timeout");
 } catch (SocketTimeoutException e) {
+  assumeBacklogConsumed();
   GenericTestUtils.assertExceptionContains(
   fs.getUri().getAuthority() + ": connect timed out", e);
 }
@@ -244,6 +249,7 @@ public class TestWebHdfsTimeouts {
   os = fs.create(new Path("/file"));
   fail("expected timeout");
 } catch (SocketTimeoutException e) {
+  assumeBacklogConsumed();
   GenericTestUtils.assertExceptionContains(
   fs.getUri().getAuthority() + ": connect timed out", e);
 } finally {
@@ -357,6 +363,28 @@ public class TestWebHdfsTimeouts {
   client.connect(nnHttpAddress);
   clients.add(client);
 }
+try {
+  GenericTestUtils.waitFor(() -> {
+try (SocketChannel c = SocketChannel.open()) {
+  c.socket().connect(nnHttpAddress, 100);
+} catch (SocketTimeoutException e) {
+  return true;
+} catch (IOException e) {
+  LOG.debug("unexpected exception: " + e);
+}
+return false;
+  }, 100, 1);
+} catch (TimeoutException | InterruptedException e) {
+  failedToConsumeBacklog = true;
+  assumeBacklogConsumed();
+}
+  }
+
+  private void assumeBacklogConsumed() {
+if (failedToConsumeBacklog) {
+  throw new AssumptionViolatedException(
+  "failed to fill up connection backlog.");
+}
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HDFS-14135. TestWebHdfsTimeouts Fails intermittently in trunk. Contributed by Ayush Saxena.

2019-07-25 Thread iwasakims
This is an automated email from the ASF dual-hosted git repository.

iwasakims pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 2478d33  HDFS-14135. TestWebHdfsTimeouts Fails intermittently in 
trunk. Contributed by Ayush Saxena.
2478d33 is described below

commit 2478d333a6dc3123f4e84d4bbb6991f1cbda1ea6
Author: Masatake Iwasaki 
AuthorDate: Fri Jun 21 14:33:41 2019 +0900

HDFS-14135. TestWebHdfsTimeouts Fails intermittently in trunk. Contributed 
by Ayush Saxena.

Signed-off-by: Masatake Iwasaki 
(cherry picked from commit 6b8107ad97251267253fa045ba03c4749f95f530)
---
 .../hadoop/hdfs/web/TestWebHdfsTimeouts.java   | 28 ++
 1 file changed, 28 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
index 7b44515..a693ac3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
@@ -35,6 +35,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
+import java.util.concurrent.TimeoutException;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -47,6 +48,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
+import org.junit.AssumptionViolatedException;
 import org.junit.Before;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -85,6 +87,7 @@ public class TestWebHdfsTimeouts {
   return conn;
 }
   });
+  private volatile boolean failedToConsumeBacklog;
 
   public enum TimeoutSource { ConnectionFactory, Configuration };
 
@@ -123,6 +126,7 @@ public class TestWebHdfsTimeouts {
 
 clients = new ArrayList();
 serverThread = null;
+failedToConsumeBacklog = false;
   }
 
   @After
@@ -213,6 +217,7 @@ public class TestWebHdfsTimeouts {
   fs.getFileChecksum(new Path("/file"));
   fail("expected timeout");
 } catch (SocketTimeoutException e) {
+  assumeBacklogConsumed();
   GenericTestUtils.assertExceptionContains(
   fs.getUri().getAuthority() + ": connect timed out", e);
 }
@@ -246,6 +251,7 @@ public class TestWebHdfsTimeouts {
   os = fs.create(new Path("/file"));
   fail("expected timeout");
 } catch (SocketTimeoutException e) {
+  assumeBacklogConsumed();
   GenericTestUtils.assertExceptionContains(
   fs.getUri().getAuthority() + ": connect timed out", e);
 } finally {
@@ -359,6 +365,28 @@ public class TestWebHdfsTimeouts {
   client.connect(nnHttpAddress);
   clients.add(client);
 }
+try {
+  GenericTestUtils.waitFor(() -> {
+try (SocketChannel c = SocketChannel.open()) {
+  c.socket().connect(nnHttpAddress, 100);
+} catch (SocketTimeoutException e) {
+  return true;
+} catch (IOException e) {
+  LOG.debug("unexpected exception: " + e);
+}
+return false;
+  }, 100, 1);
+} catch (TimeoutException | InterruptedException e) {
+  failedToConsumeBacklog = true;
+  assumeBacklogConsumed();
+}
+  }
+
+  private void assumeBacklogConsumed() {
+if (failedToConsumeBacklog) {
+  throw new AssumptionViolatedException(
+  "failed to fill up connection backlog.");
+}
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-14135. TestWebHdfsTimeouts Fails intermittently in trunk. Contributed by Ayush Saxena.

2019-07-25 Thread iwasakims
This is an automated email from the ASF dual-hosted git repository.

iwasakims pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6b8107a  HDFS-14135. TestWebHdfsTimeouts Fails intermittently in 
trunk. Contributed by Ayush Saxena.
6b8107a is described below

commit 6b8107ad97251267253fa045ba03c4749f95f530
Author: Masatake Iwasaki 
AuthorDate: Fri Jun 21 14:33:41 2019 +0900

HDFS-14135. TestWebHdfsTimeouts Fails intermittently in trunk. Contributed 
by Ayush Saxena.

Signed-off-by: Masatake Iwasaki 
---
 .../hadoop/hdfs/web/TestWebHdfsTimeouts.java   | 28 ++
 1 file changed, 28 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
index 7b44515..a693ac3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
@@ -35,6 +35,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
+import java.util.concurrent.TimeoutException;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -47,6 +48,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
+import org.junit.AssumptionViolatedException;
 import org.junit.Before;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -85,6 +87,7 @@ public class TestWebHdfsTimeouts {
   return conn;
 }
   });
+  private volatile boolean failedToConsumeBacklog;
 
   public enum TimeoutSource { ConnectionFactory, Configuration };
 
@@ -123,6 +126,7 @@ public class TestWebHdfsTimeouts {
 
 clients = new ArrayList();
 serverThread = null;
+failedToConsumeBacklog = false;
   }
 
   @After
@@ -213,6 +217,7 @@ public class TestWebHdfsTimeouts {
   fs.getFileChecksum(new Path("/file"));
   fail("expected timeout");
 } catch (SocketTimeoutException e) {
+  assumeBacklogConsumed();
   GenericTestUtils.assertExceptionContains(
   fs.getUri().getAuthority() + ": connect timed out", e);
 }
@@ -246,6 +251,7 @@ public class TestWebHdfsTimeouts {
   os = fs.create(new Path("/file"));
   fail("expected timeout");
 } catch (SocketTimeoutException e) {
+  assumeBacklogConsumed();
   GenericTestUtils.assertExceptionContains(
   fs.getUri().getAuthority() + ": connect timed out", e);
 } finally {
@@ -359,6 +365,28 @@ public class TestWebHdfsTimeouts {
   client.connect(nnHttpAddress);
   clients.add(client);
 }
+try {
+  GenericTestUtils.waitFor(() -> {
+try (SocketChannel c = SocketChannel.open()) {
+  c.socket().connect(nnHttpAddress, 100);
+} catch (SocketTimeoutException e) {
+  return true;
+} catch (IOException e) {
+  LOG.debug("unexpected exception: " + e);
+}
+return false;
+  }, 100, 1);
+} catch (TimeoutException | InterruptedException e) {
+  failedToConsumeBacklog = true;
+  assumeBacklogConsumed();
+}
+  }
+
+  private void assumeBacklogConsumed() {
+if (failedToConsumeBacklog) {
+  throw new AssumptionViolatedException(
+  "failed to fill up connection backlog.");
+}
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/01: Revert "HDDS-1830 OzoneManagerDoubleBuffer#stop should wait for daemon thread to die (#1156)"

2019-07-25 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch revert-1156-HDDS-1830
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 74937b8286753671deb4e9e4e16b24d182dde28b
Author: Arpit Agarwal 
AuthorDate: Thu Jul 25 16:18:04 2019 -0700

Revert "HDDS-1830 OzoneManagerDoubleBuffer#stop should wait for daemon 
thread to die (#1156)"

This reverts commit b7fba78fb63a0971835db87292822fd8cd4aa7ad.
---
 .../ozone/om/ratis/OzoneManagerDoubleBuffer.java | 20 +++-
 1 file changed, 7 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
index e329d5a..2bde3ad 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.om.ratis;
 import java.io.IOException;
 import java.util.Queue;
 import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -65,7 +64,7 @@ public class OzoneManagerDoubleBuffer {
   private final OMMetadataManager omMetadataManager;
   private final AtomicLong flushedTransactionCount = new AtomicLong(0);
   private final AtomicLong flushIterations = new AtomicLong(0);
-  private final AtomicBoolean isRunning = new AtomicBoolean(false);
+  private volatile boolean isRunning;
   private OzoneManagerDoubleBufferMetrics ozoneManagerDoubleBufferMetrics;
   private long maxFlushedTransactionsInOneIteration;
 
@@ -80,7 +79,7 @@ public class OzoneManagerDoubleBuffer {
 this.ozoneManagerDoubleBufferMetrics =
 OzoneManagerDoubleBufferMetrics.create();
 
-isRunning.set(true);
+isRunning = true;
 // Daemon thread which runs in back ground and flushes transactions to DB.
 daemon = new Daemon(this::flushTransactions);
 daemon.setName("OMDoubleBufferFlushThread");
@@ -93,7 +92,7 @@ public class OzoneManagerDoubleBuffer {
* and commit to DB.
*/
   private void flushTransactions() {
-while (isRunning.get()) {
+while(isRunning) {
   try {
 if (canFlush()) {
   setReadyBuffer();
@@ -141,7 +140,7 @@ public class OzoneManagerDoubleBuffer {
 }
   } catch (InterruptedException ex) {
 Thread.currentThread().interrupt();
-if (isRunning.get()) {
+if (isRunning) {
   final String message = "OMDoubleBuffer flush thread " +
   Thread.currentThread().getName() + " encountered Interrupted " +
   "exception while running";
@@ -202,16 +201,11 @@ public class OzoneManagerDoubleBuffer {
   /**
* Stop OM DoubleBuffer flush thread.
*/
-  public void stop() {
-if (isRunning.compareAndSet(true, false)) {
+  public synchronized void stop() {
+if (isRunning) {
   LOG.info("Stopping OMDoubleBuffer flush thread");
+  isRunning = false;
   daemon.interrupt();
-  try {
-// Wait for daemon thread to exit
-daemon.join();
-  } catch (InterruptedException e) {
-LOG.error("Interrupted while waiting for daemon to exit.");
-  }
 
   // stop metrics.
   ozoneManagerDoubleBufferMetrics.unRegister();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch revert-1156-HDDS-1830 created (now 74937b8)

2019-07-25 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch revert-1156-HDDS-1830
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at 74937b8  Revert "HDDS-1830 OzoneManagerDoubleBuffer#stop should wait 
for daemon thread to die (#1156)"

This branch includes the following new commits:

 new 74937b8  Revert "HDDS-1830 OzoneManagerDoubleBuffer#stop should wait 
for daemon thread to die (#1156)"

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1830 OzoneManagerDoubleBuffer#stop should wait for daemon thread to die (#1156)

2019-07-25 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b7fba78  HDDS-1830 OzoneManagerDoubleBuffer#stop should wait for 
daemon thread to die (#1156)
b7fba78 is described below

commit b7fba78fb63a0971835db87292822fd8cd4aa7ad
Author: Siyao Meng <50227127+smen...@users.noreply.github.com>
AuthorDate: Thu Jul 25 16:14:50 2019 -0700

HDDS-1830 OzoneManagerDoubleBuffer#stop should wait for daemon thread to 
die (#1156)
---
 .../ozone/om/ratis/OzoneManagerDoubleBuffer.java | 20 +---
 1 file changed, 13 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
index 2bde3ad..e329d5a 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.ratis;
 import java.io.IOException;
 import java.util.Queue;
 import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -64,7 +65,7 @@ public class OzoneManagerDoubleBuffer {
   private final OMMetadataManager omMetadataManager;
   private final AtomicLong flushedTransactionCount = new AtomicLong(0);
   private final AtomicLong flushIterations = new AtomicLong(0);
-  private volatile boolean isRunning;
+  private final AtomicBoolean isRunning = new AtomicBoolean(false);
   private OzoneManagerDoubleBufferMetrics ozoneManagerDoubleBufferMetrics;
   private long maxFlushedTransactionsInOneIteration;
 
@@ -79,7 +80,7 @@ public class OzoneManagerDoubleBuffer {
 this.ozoneManagerDoubleBufferMetrics =
 OzoneManagerDoubleBufferMetrics.create();
 
-isRunning = true;
+isRunning.set(true);
 // Daemon thread which runs in back ground and flushes transactions to DB.
 daemon = new Daemon(this::flushTransactions);
 daemon.setName("OMDoubleBufferFlushThread");
@@ -92,7 +93,7 @@ public class OzoneManagerDoubleBuffer {
* and commit to DB.
*/
   private void flushTransactions() {
-while(isRunning) {
+while (isRunning.get()) {
   try {
 if (canFlush()) {
   setReadyBuffer();
@@ -140,7 +141,7 @@ public class OzoneManagerDoubleBuffer {
 }
   } catch (InterruptedException ex) {
 Thread.currentThread().interrupt();
-if (isRunning) {
+if (isRunning.get()) {
   final String message = "OMDoubleBuffer flush thread " +
   Thread.currentThread().getName() + " encountered Interrupted " +
   "exception while running";
@@ -201,11 +202,16 @@ public class OzoneManagerDoubleBuffer {
   /**
* Stop OM DoubleBuffer flush thread.
*/
-  public synchronized void stop() {
-if (isRunning) {
+  public void stop() {
+if (isRunning.compareAndSet(true, false)) {
   LOG.info("Stopping OMDoubleBuffer flush thread");
-  isRunning = false;
   daemon.interrupt();
+  try {
+// Wait for daemon thread to exit
+daemon.join();
+  } catch (InterruptedException e) {
+LOG.error("Interrupted while waiting for daemon to exit.");
+  }
 
   // stop metrics.
   ozoneManagerDoubleBufferMetrics.unRegister();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4.1 updated: HDDS-1850. ReplicationManager should consider inflight replication and deletion while picking datanode for re-replication.

2019-07-25 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 9a5ebec  HDDS-1850. ReplicationManager should consider inflight 
replication and deletion while picking datanode for re-replication.
9a5ebec is described below

commit 9a5ebecf0457b02eb30a6f32132d70e7eaac2579
Author: Nanda kumar 
AuthorDate: Thu Jul 25 15:07:07 2019 +0530

HDDS-1850. ReplicationManager should consider inflight replication and 
deletion while picking datanode for re-replication.

Signed-off-by: Anu Engineer 
(cherry picked from commit 2b1d8aedbb669cf412465bf7a5762c8aeda52faa)
---
 .../org/apache/hadoop/hdds/scm/container/ReplicationManager.java  | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
index 33bf931..0557ea1 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
@@ -483,8 +483,14 @@ public class ReplicationManager {
 final int replicationFactor = container
 .getReplicationFactor().getNumber();
 final int delta = replicationFactor - getReplicaCount(id, replicas);
+final List excludeList = replicas.stream()
+.map(ContainerReplica::getDatanodeDetails)
+.collect(Collectors.toList());
+inflightReplication.get(id).stream().map(r -> r.datanode)
+.forEach(excludeList::add);
 final List selectedDatanodes = containerPlacement
-.chooseDatanodes(source, null, delta, container.getUsedBytes());
+.chooseDatanodes(excludeList, null, delta,
+container.getUsedBytes());
 
 LOG.info("Container {} is under replicated. Expected replica count" +
 " is {}, but found {}.", id, replicationFactor,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1850. ReplicationManager should consider inflight replication and deletion while picking datanode for re-replication.

2019-07-25 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2b1d8ae  HDDS-1850. ReplicationManager should consider inflight 
replication and deletion while picking datanode for re-replication.
2b1d8ae is described below

commit 2b1d8aedbb669cf412465bf7a5762c8aeda52faa
Author: Nanda kumar 
AuthorDate: Thu Jul 25 15:07:07 2019 +0530

HDDS-1850. ReplicationManager should consider inflight replication and 
deletion while picking datanode for re-replication.

Signed-off-by: Anu Engineer 
---
 .../org/apache/hadoop/hdds/scm/container/ReplicationManager.java  | 8 +++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
index 33bf931..0557ea1 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
@@ -483,8 +483,14 @@ public class ReplicationManager {
 final int replicationFactor = container
 .getReplicationFactor().getNumber();
 final int delta = replicationFactor - getReplicaCount(id, replicas);
+final List excludeList = replicas.stream()
+.map(ContainerReplica::getDatanodeDetails)
+.collect(Collectors.toList());
+inflightReplication.get(id).stream().map(r -> r.datanode)
+.forEach(excludeList::add);
 final List selectedDatanodes = containerPlacement
-.chooseDatanodes(source, null, delta, container.getUsedBytes());
+.chooseDatanodes(excludeList, null, delta,
+container.getUsedBytes());
 
 LOG.info("Container {} is under replicated. Expected replica count" +
 " is {}, but found {}.", id, replicationFactor,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/01: Merge branch 'trunk' into ozone-0.4.1

2019-07-25 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 20b2cd669527b152a2750c86d0ed9c6cc6a31846
Merge: 0b92dec b41ef61
Author: Anu Engineer 
AuthorDate: Thu Jul 25 14:08:41 2019 -0700

Merge branch 'trunk' into ozone-0.4.1

 .../org/apache/hadoop/fs/shell/AclCommands.java|   6 +-
 .../main/java/org/apache/hadoop/io/IOUtils.java|   2 -
 .../src/main/resources/core-default.xml|   4 +-
 .../src/site/markdown/FileSystemShell.md   |   2 +-
 .../src/site/markdown/filesystem/filesystem.md |  44 ++-
 .../filesystem/fsdatainputstreambuilder.md |   2 +-
 .../AbstractContractRootDirectoryTest.java |   2 +-
 .../hadoop/fs/contract/ContractTestUtils.java  |  30 +-
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |   4 +
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   1 +
 .../common/src/main/resources/ozone-default.xml|   8 +
 .../hadoop/hdds/scm/cli/TopologySubcommand.java|  60 +++-
 .../server/federation/router/FederationUtil.java   |  13 +-
 .../federation/router/TestFederationUtil.java  |  69 
 .../hdfs/server/namenode/FSImageFormatPBINode.java |   4 +-
 .../hdfs/server/namenode/INodeDirectory.java   |  16 +
 .../hadoop/hdfs/server/namenode/NameNode.java  |   4 +
 .../hadoop/hdfs/server/common/TestJspHelper.java   |  34 ++
 .../test/java/org/apache/hadoop/hdfs/NNBench.java  |   2 +-
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |   1 -
 .../org/apache/hadoop/ozone/audit/OMAction.java|   2 +
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |   3 +
 .../hadoop/ozone/om/exceptions/OMException.java|   3 +-
 .../ozone/om/protocol/OzoneManagerHAProtocol.java  |  18 +-
 .../src/main/proto/OzoneManagerProtocol.proto  |   9 +-
 .../dist/src/main/compose/ozone-topology/test.sh   |   2 +
 .../dist/src/main/smoketest/topology/scmcli.robot  |  32 ++
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |   7 +
 .../hadoop/ozone/MiniOzoneHAClusterImpl.java   |  54 ++-
 .../ozoneimpl/TestSecureOzoneContainer.java|   2 +
 .../server/TestSecureContainerServer.java  |   2 +
 .../hadoop/ozone/om/TestOMRatisSnapshots.java  | 189 ++
 .../apache/hadoop/ozone/om/TestOzoneManagerHA.java |   7 +-
 .../org/apache/hadoop/ozone/om/KeyManager.java |  11 -
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  10 +-
 .../hadoop/ozone/om/OMDBCheckpointServlet.java |   2 +-
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java |  22 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 381 +++--
 .../ozone/om/ratis/OzoneManagerDoubleBuffer.java   |   3 +
 .../ozone/om/ratis/OzoneManagerRatisServer.java|  15 +-
 .../ozone/om/ratis/OzoneManagerStateMachine.java   | 130 +++
 .../om/ratis/utils/OzoneManagerRatisUtils.java |  12 +
 .../request/s3/bucket/S3BucketDeleteRequest.java   | 193 +++
 .../S3InitiateMultipartUploadRequest.java  | 213 
 .../multipart/S3MultipartUploadAbortRequest.java   | 173 ++
 .../S3MultipartUploadCommitPartRequest.java| 217 
 .../om/request/s3/multipart/package-info.java} |  21 +-
 .../om/request/volume/OMVolumeSetOwnerRequest.java |   4 +-
 .../om/request/volume/OMVolumeSetQuotaRequest.java |   3 +-
 .../response/s3/bucket/S3BucketCreateResponse.java |   8 +-
 ...teResponse.java => S3BucketDeleteResponse.java} |  54 +--
 .../S3InitiateMultipartUploadResponse.java |  80 +
 .../multipart/S3MultipartUploadAbortResponse.java  |  83 +
 .../S3MultipartUploadCommitPartResponse.java   | 109 ++
 .../om/response/s3/multipart/package-info.java}|  22 +-
 .../om/snapshot/OzoneManagerSnapshotProvider.java  |   2 +-
 .../OzoneManagerHARequestHandlerImpl.java  |  18 +-
 .../protocolPB/OzoneManagerRequestHandler.java |  35 --
 .../ozone/om/request/TestOMRequestUtils.java   |  85 +
 .../s3/bucket/TestS3BucketDeleteRequest.java   | 167 +
 .../TestS3InitiateMultipartUploadRequest.java  | 153 +
 .../s3/multipart/TestS3MultipartRequest.java   | 178 ++
 .../TestS3MultipartUploadAbortRequest.java | 158 +
 .../TestS3MultipartUploadCommitPartRequest.java| 209 +++
 .../s3/multipart/package-info.java}|  20 +-
 .../ozone/om/response/TestOMResponseUtils.java |  42 +++
 .../s3/bucket/TestS3BucketCreateResponse.java  |  41 +--
 .../s3/bucket/TestS3BucketDeleteResponse.java  |  73 
 .../TestS3InitiateMultipartUploadResponse.java |  63 
 .../s3/multipart/TestS3MultipartResponse.java  | 143 
 .../TestS3MultipartUploadAbortResponse.java| 129 +++
 .../multipart/package-info.java}   |  20 +-
 hadoop-project/pom.xml |   3 +-
 .../java/org/apache/hadoop/fs/s3a/Constants.java   |   4 

[hadoop] branch ozone-0.4.1 updated (0b92dec -> 20b2cd6)

2019-07-25 Thread aengineer
This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a change to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 0b92dec  HDDS-1858. mTLS support for Ozone is not correct. Contributed 
by Siddharth Wagle.
 add d545f9c  HADOOP-16437 documentation typo fix: 
fs.s3a.experimental.input.fadvise
 add 4e66cb9  HDDS-1653. Add option to "ozone scmcli printTopology" to 
order the output acccording to topology layer. Contributed by Xiaoyu Yao.  
(#1067)
 add 6282c02  HDFS-14577. RBF: FederationUtil#newInstance should allow 
constructor without context. Contributed by CR Hota.
 add cd967c7  HADOOP-15847. S3Guard testConcurrentTableCreations to set R/W 
capacity == 0
 add 7f1b76c  HADOOP-13868. [s3a] New default for S3A multi-part 
configuration (#1125)
 add 69a46a9  HDDS-1713. ReplicationManager fail to find proper node 
topology based… (#1112)
 add f5b2f75  HDDS-1795. Implement S3 Delete Bucket request to use Cache 
and DoubleBuffer. (#1097)
 add 0b45293  HADOOP-16404. ABFS default blocksize change(256MB from 512MB)
 add e60f5e2  HADOOP-16440. Distcp can not preserve timestamp with -delete 
option. Contributed by ludun.
 add 4aa76e3  HDDS-1805. Implement S3 Initiate MPU request to use Cache and 
DoubleBuffer. (#1108)
 add acdb0a1  HDDS-1841. Fix TestSecureContainerServer. (#1136)
 add 340bbaf  HDDS-1840. Fix TestSecureOzoneContainer. (#1135)
 add cdc36fe  HDDS-1649. On installSnapshot notification from OM leader, 
download checkpoint and reload OM state (#948)
 add c958edd  HDDS-1811. Prometheus metrics are broken.
 add 2ea71d9  HDDS-1686. Remove check to get from openKeyTable in acl 
implementatio… (#966)
 add f3f2f34  HDDS-1585. Add LICENSE.txt and NOTICE.txt to Ozone Recon Web
 add d70ec4b  HDDS-1799. Add goofyfs to the ozone-runner docker image
 add d59f271  HDDS-1803. shellcheck.sh does not work on Mac
 add c533b79  HDDS-1710. Publish JVM metrics via Hadoop metrics 
Signed-off-by: Anu Engineer 
 add ee87e9a  MAPREDUCE-7076. TestNNBench#testNNBenchCreateReadAndDelete 
failing in our internal build
 add 377f95b  HDFS-13693. Remove unnecessary search in 
INodeDirectory.addChild during image loading. Contributed by Lisheng Sun.
 add eb36b09  HADOOP-16443. Improve help text for setfacl --set option.
 add 4317d33  HADOOP-16380. S3Guard to determine empty directory status for 
all non-root directories.
 add 278390f  HDDS-1710. Fix checkstyle errors
 add fbe2fc6  HDDS-1846. Default value for checksum bytes is different in 
ozone-site.xml and code.
 add ecb9f81  HDDS-1845. Fix 
OMVolumeSetQuota|OwnerRequest#validateAndUpdateCache return response. (#1141)
 add a1251ad  HADOOP-16431. Remove useless log in IOUtils.java and 
ExceptionDiags.java.
 add cf9ff08  HDDS-1848. Fix TestOzoneManagerHA and 
TestOzoneManagerSnapShotProvider. (#1148)
 add 7b21977  HADOOP-16433. S3Guard: Filter expired entries and tombstones 
when listing with MetadataStore.listChildren().
 add cb69700  HDDS-1853. Fix failing blockade test-cases. (#1151)
 add 1d98a21  HDDS-1819. Implement S3 Commit MPU request to use Cache and 
DoubleBuffer. (#1140)
 add 2546e6e  HDDS-1817. GetKey fails with IllegalArgumentException.
 add 9b8b3ac  HADOOP-16451. Update jackson-databind to 2.9.9.1. Contributed 
by Siyao Meng.
 add 62deab1  HDFS-14647. NPE during secure namenode startup. Contributed 
by Fengnan Li.
 add ba43233  HDDS-1749 : Ozone Client should randomize the list of nodes 
in pipeline for reads. (#1124)
 add 3c4159f  HDDS-1842. Implement S3 Abort MPU request to use Cache and 
DoubleBuffer. (#1155)
 add b41ef61  HDDS-1858. mTLS support for Ozone is not correct. Contributed 
by Siddharth Wagle.
 new 20b2cd6  Merge branch 'trunk' into ozone-0.4.1

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../org/apache/hadoop/fs/shell/AclCommands.java|   6 +-
 .../main/java/org/apache/hadoop/io/IOUtils.java|   2 -
 .../src/main/resources/core-default.xml|   4 +-
 .../src/site/markdown/FileSystemShell.md   |   2 +-
 .../src/site/markdown/filesystem/filesystem.md |  44 ++-
 .../filesystem/fsdatainputstreambuilder.md |   2 +-
 .../AbstractContractRootDirectoryTest.java |   2 +-
 .../hadoop/fs/contract/ContractTestUtils.java  |  30 +-
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |   4 +
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   1 +
 .../common/src/main/resources/ozone-default.xml|   8 +
 .../hadoop/hdds/scm/cli/TopologySubcommand.java|  60 +++-
 .../server/federation/router/FederationUtil.java   |  13 +-
 .../federation/router/TestFederationUtil.java  |  69 
 

[hadoop] 02/50: HDFS-12977. [SBN read] Add stateId to RPC headers. Contributed by Plamen Jeliazkov.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit ccad9ce59bb85c4ec5adcad10bb77e2172f2b1ad
Author: Plamen Jeliazkov 
AuthorDate: Tue Mar 20 18:48:40 2018 -0700

HDFS-12977. [SBN read] Add stateId to RPC headers. Contributed by Plamen 
Jeliazkov.
---
 .../org/apache/hadoop/ipc/AlignmentContext.java|  51 
 .../main/java/org/apache/hadoop/ipc/Client.java|   9 ++
 .../org/apache/hadoop/ipc/ProtobufRpcEngine.java   |   9 +-
 .../src/main/java/org/apache/hadoop/ipc/RPC.java   |  10 +-
 .../main/java/org/apache/hadoop/ipc/RpcEngine.java |   5 +-
 .../main/java/org/apache/hadoop/ipc/Server.java|  21 +++-
 .../org/apache/hadoop/ipc/WritableRpcEngine.java   |  33 +-
 .../hadoop-common/src/main/proto/RpcHeader.proto   |   1 +
 .../test/java/org/apache/hadoop/ipc/TestRPC.java   |   3 +-
 .../org/apache/hadoop/hdfs/ClientGCIContext.java   |  65 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java |   3 +
 .../hdfs/server/namenode/GlobalStateIdContext.java |  59 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java|   1 +
 .../hadoop/hdfs/TestStateAlignmentContext.java | 131 +
 14 files changed, 389 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
new file mode 100644
index 000..f952325
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
+
+/**
+ * This interface intends to align the state between client and server
+ * via RPC communication.
+ *
+ * This should be implemented separately on the client side and server side
+ * and can be used to pass state information on RPC responses from server
+ * to client.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public interface AlignmentContext {
+
+  /**
+   * This is the intended server method call to implement to pass state info
+   * during RPC response header construction.
+   * @param header The RPC response header builder.
+   */
+  void updateResponseState(RpcResponseHeaderProto.Builder header);
+
+  /**
+   * This is the intended client method call to implement to recieve state info
+   * during RPC response processing.
+   * @param header The RPC response header.
+   */
+  void receiveResponseState(RpcResponseHeaderProto header);
+
+}
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 9ee0647..bb09799 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -103,6 +103,12 @@ public class Client implements AutoCloseable {
   return false;
 }
   };
+  private static AlignmentContext alignmentContext;
+
+  /** Set alignment context to use to fetch state alignment info from RPC. */
+  public static void setAlignmentContext(AlignmentContext ac) {
+alignmentContext = ac;
+  }
 
   @SuppressWarnings("unchecked")
   @Unstable
@@ -1184,6 +1190,9 @@ public class Client implements AutoCloseable {
   final Call call = calls.remove(callId);
   call.setRpcResponse(value);
 }
+if (alignmentContext != null) {
+  alignmentContext.receiveResponseState(header);
+}
 // verify that packet length was correct
 if (packet.remaining() > 0) {
   throw new RpcClientException("RPC response length mismatch");
diff --git 

[hadoop] 33/50: HDFS-14094. [SBN read] Fix the order of logging arguments in ObserverReadProxyProvider. Contributed by Ayush Saxena.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 54a1c66e4842efd2f501bc9bd68909e556fd815c
Author: Konstantin V Shvachko 
AuthorDate: Sun Nov 25 12:46:28 2018 -0800

HDFS-14094. [SBN read] Fix the order of logging arguments in 
ObserverReadProxyProvider. Contributed by Ayush Saxena.
---
 .../hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index 1e85a8e..5d56c59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -302,7 +302,7 @@ public class ObserverReadProxyProvider
 // If we get here, it means all observers have failed.
 LOG.warn("{} observers have failed for read request {}; also found " +
 "{} standby and {} active. Falling back to active.",
-failedObserverCount, standbyCount, activeCount, method.getName());
+failedObserverCount, method.getName(), standbyCount, activeCount);
   }
 
   // Either all observers have failed, or that it is a write request.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 42/50: HDFS-14160. [SBN read] ObserverReadInvocationHandler should implement RpcInvocationHandler. Contributed by Konstantin V Shvachko.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 451b8b8f1675366de3f17762385867eda307f904
Author: Konstantin V Shvachko 
AuthorDate: Wed Dec 19 12:39:57 2018 -0800

HDFS-14160. [SBN read] ObserverReadInvocationHandler should implement 
RpcInvocationHandler. Contributed by Konstantin V Shvachko.
---
 .../hdfs/server/namenode/ha/ObserverReadProxyProvider.java  | 13 +++--
 .../org/apache/hadoop/hdfs/server/namenode/TestFsck.java|  2 +-
 .../apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java   | 10 +++---
 .../hadoop/hdfs/server/namenode/ha/TestObserverNode.java| 13 +
 4 files changed, 32 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index e9d53f6..96932a7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import java.io.Closeable;
 import java.io.IOException;
-import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
@@ -39,9 +38,11 @@ import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
 import org.apache.hadoop.ipc.AlignmentContext;
+import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.ObserverRetryOnActiveException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RpcInvocationHandler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -239,7 +240,7 @@ public class ObserverReadProxyProvider
*
* Write requests are always forwarded to the active.
*/
-  private class ObserverReadInvocationHandler implements InvocationHandler {
+  private class ObserverReadInvocationHandler implements RpcInvocationHandler {
 
 @Override
 public Object invoke(Object proxy, final Method method, final Object[] 
args)
@@ -322,6 +323,14 @@ public class ObserverReadProxyProvider
   lastProxy = activeProxy;
   return retVal;
 }
+
+@Override
+public void close() throws IOException {}
+
+@Override
+public ConnectionId getConnectionId() {
+  return RPC.getConnectionIdForProxy(getCurrentProxy().proxy);
+}
   }
 
   @Override
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 2b5d762..3ef8587 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -166,7 +166,7 @@ public class TestFsck {
   private static final String LINE_SEPARATOR =
   System.getProperty("line.separator");
 
-  static String runFsck(Configuration conf, int expectedErrCode, 
+  public static String runFsck(Configuration conf, int expectedErrCode,
 boolean checkErrorCode, String... path)
 throws Exception {
 ByteArrayOutputStream bStream = new ByteArrayOutputStream();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
index 1e9418b..16aa924 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
@@ -175,10 +175,14 @@ public abstract class HATestUtil {
   MiniDFSCluster cluster, Configuration conf,
   Class classFPP, boolean isObserverReadEnabled)
   throws IOException, URISyntaxException {
-conf = new Configuration(conf);
-setupHAConfiguration(cluster, conf, 0, classFPP);
+String logicalName = conf.get(DFSConfigKeys.DFS_NAMESERVICES);
+URI nnUri = new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + logicalName);
+conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX
++ "." + logicalName, classFPP.getName());
+conf.set("fs.defaultFS", nnUri.toString());
+
 DistributedFileSystem dfs = (DistributedFileSystem)
-

[hadoop] 47/50: HDFS-14272. [SBN read] Make ObserverReadProxyProvider initialize its state ID against the active NN on startup. Contributed by Erik Krogen.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 16a4043f01f0c33ef9066b2f4b45f50ae7a13022
Author: Erik Krogen 
AuthorDate: Fri Mar 1 12:58:55 2019 -0800

HDFS-14272. [SBN read] Make ObserverReadProxyProvider initialize its state 
ID against the active NN on startup. Contributed by Erik Krogen.
---
 .../namenode/ha/ObserverReadProxyProvider.java | 34 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java|  3 +-
 .../namenode/ha/TestConsistentReadsObserver.java   | 73 +-
 3 files changed, 107 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index 356600f..fe867c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -88,6 +88,15 @@ public class ObserverReadProxyProvider
   private boolean observerReadEnabled;
 
   /**
+   * A client using an ObserverReadProxyProvider should first sync with the
+   * active NameNode on startup. This ensures that the client reads data which
+   * is consistent with the state of the world as of the time of its
+   * instantiation. This variable will be true after this initial sync has
+   * been performed.
+   */
+  private volatile boolean msynced = false;
+
+  /**
* The index into the nameNodeProxies list currently being used. Should only
* be accessed in synchronized methods.
*/
@@ -225,6 +234,22 @@ public class ObserverReadProxyProvider
   }
 
   /**
+   * This will call {@link ClientProtocol#msync()} on the active NameNode
+   * (via the {@link #failoverProxy}) to initialize the state of this client.
+   * Calling it multiple times is a no-op; only the first will perform an
+   * msync.
+   *
+   * @see #msynced
+   */
+  private synchronized void initializeMsync() throws IOException {
+if (msynced) {
+  return; // No need for an msync
+}
+failoverProxy.getProxy().proxy.msync();
+msynced = true;
+  }
+
+  /**
* An InvocationHandler to handle incoming requests. This class's invoke
* method contains the primary logic for redirecting to observers.
*
@@ -244,6 +269,12 @@ public class ObserverReadProxyProvider
   Object retVal;
 
   if (observerReadEnabled && isRead(method)) {
+if (!msynced) {
+  // An msync() must first be performed to ensure that this client is
+  // up-to-date with the active's state. This will only be done once.
+  initializeMsync();
+}
+
 int failedObserverCount = 0;
 int activeCount = 0;
 int standbyCount = 0;
@@ -315,6 +346,9 @@ public class ObserverReadProxyProvider
 // This exception will be handled by higher layers
 throw e.getCause();
   }
+  // If this was reached, the request reached the active, so the
+  // state is up-to-date with active and no further msync is needed.
+  msynced = true;
   lastProxy = activeProxy;
   return retVal;
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 644a480..a06e763 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1342,7 +1342,8 @@ public class NameNodeRpcServer implements 
NamenodeProtocols {
 
   @Override // ClientProtocol
   public void msync() throws IOException {
-// TODO : need to be filled up if needed. May be a no-op here.
+// Check for write access to ensure that msync only happens on active
+namesystem.checkOperation(OperationCategory.WRITE);
   }
 
   @Override // ClientProtocol
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
index 2845670..2bed37c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
@@ -178,8 +178,12 @@ public class TestConsistentReadsObserver {
 // Therefore, the subsequent 

[hadoop] 39/50: HDFS-14146. [SBN read] Handle exceptions from and prevent handler threads from blocking within internalQueueCall. Contributed by Chao Sun.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 8b8ec65e65075d60e97f7344935c85c645793378
Author: Erik Krogen 
AuthorDate: Fri Dec 14 14:02:20 2018 -0800

HDFS-14146. [SBN read] Handle exceptions from and prevent handler threads 
from blocking within internalQueueCall. Contributed by Chao Sun.
---
 .../java/org/apache/hadoop/ipc/ExternalCall.java   |  3 +-
 .../main/java/org/apache/hadoop/ipc/Server.java| 39 +++---
 .../namenode/ha/TestConsistentReadsObserver.java   | 63 ++
 3 files changed, 97 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
index 5566136..5cc3665 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ExternalCall.java
@@ -24,6 +24,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.ipc.Server.Call;
+import 
org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
 import org.apache.hadoop.security.UserGroupInformation;
 
 public abstract class ExternalCall extends Call {
@@ -78,7 +79,7 @@ public abstract class ExternalCall extends Call {
   }
 
   @Override
-  final void doResponse(Throwable t) {
+  final void doResponse(Throwable t, RpcStatusProto status) {
 synchronized(done) {
   error = t;
   done.set(true);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index c684314..92d850d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -800,7 +800,11 @@ public abstract class Server {
   }
 }
 
-void doResponse(Throwable t) throws IOException {}
+void doResponse(Throwable t) throws IOException {
+  doResponse(t, RpcStatusProto.FATAL);
+}
+
+void doResponse(Throwable t, RpcStatusProto proto) throws IOException {}
 
 // For Schedulable
 @Override
@@ -967,15 +971,17 @@ public abstract class Server {
 }
 
 @Override
-void doResponse(Throwable t) throws IOException {
+void doResponse(Throwable t, RpcStatusProto status) throws IOException {
   RpcCall call = this;
   if (t != null) {
+if (status == null) {
+  status = RpcStatusProto.FATAL;
+}
 // clone the call to prevent a race with another thread stomping
 // on the response while being sent.  the original call is
 // effectively discarded since the wait count won't hit zero
 call = new RpcCall(this);
-setupResponse(call,
-RpcStatusProto.FATAL, RpcErrorCodeProto.ERROR_RPC_SERVER,
+setupResponse(call, status, RpcErrorCodeProto.ERROR_RPC_SERVER,
 null, t.getClass().getName(), StringUtils.stringifyException(t));
   } else {
 setupResponse(call, call.responseParams.returnStatus,
@@ -2707,8 +2713,18 @@ public abstract class Server {
 
   private void internalQueueCall(Call call)
   throws IOException, InterruptedException {
+internalQueueCall(call, true);
+  }
+
+  private void internalQueueCall(Call call, boolean blocking)
+  throws IOException, InterruptedException {
 try {
-  callQueue.put(call); // queue the call; maybe blocked here
+  // queue the call, may be blocked if blocking is true.
+  if (blocking) {
+callQueue.put(call);
+  } else {
+callQueue.add(call);
+  }
 } catch (CallQueueOverflowException cqe) {
   // If rpc scheduler indicates back off based on performance degradation
   // such as response time or rpc queue is full, we will ask the client
@@ -2751,8 +2767,8 @@ public abstract class Server {
  * In case of Observer, it handles only reads, which are
  * commutative.
  */
-//Re-queue the call and continue
-internalQueueCall(call);
+// Re-queue the call and continue
+requeueCall(call);
 continue;
   }
   if (LOG.isDebugEnabled()) {
@@ -2794,6 +2810,15 @@ public abstract class Server {
   LOG.debug(Thread.currentThread().getName() + ": exiting");
 }
 
+private void requeueCall(Call call)
+throws IOException, InterruptedException {
+  try {
+internalQueueCall(call, false);
+  } catch (RpcServerException rse) {
+call.doResponse(rse.getCause(), rse.getRpcStatusProto());
+  }
+}
+
   }
 
  

[hadoop] 48/50: HDFS-14211. [SBN Read]. Add a configurable flag to enable always-msync mode to ObserverReadProxyProvider. Contributed by Erik Krogen.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 11fee2d4e1717f6d06a6186620186b21201cdec5
Author: Erik Krogen 
AuthorDate: Fri Mar 8 10:35:31 2019 -0800

HDFS-14211. [SBN Read]. Add a configurable flag to enable always-msync mode 
to ObserverReadProxyProvider. Contributed by Erik Krogen.
---
 .../namenode/ha/ObserverReadProxyProvider.java | 64 ++
 .../src/site/markdown/ObserverNameNode.md  | 48 +++-
 .../namenode/ha/TestConsistentReadsObserver.java   | 56 +++
 3 files changed, 156 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index fe867c5..31c2ddf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -23,6 +23,7 @@ import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.net.URI;
+import java.util.concurrent.TimeUnit;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -43,6 +44,7 @@ import org.apache.hadoop.ipc.ObserverRetryOnActiveException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RpcInvocationHandler;
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -68,6 +70,12 @@ public class ObserverReadProxyProvider
   private static final Logger LOG = LoggerFactory.getLogger(
   ObserverReadProxyProvider.class);
 
+  /** Configuration key for {@link #autoMsyncPeriodMs}. */
+  static final String AUTO_MSYNC_PERIOD_KEY_PREFIX =
+  HdfsClientConfigKeys.Failover.PREFIX + "observer.auto-msync-period";
+  /** Auto-msync disabled by default. */
+  static final long AUTO_MSYNC_PERIOD_DEFAULT = -1;
+
   /** Client-side context for syncing with the NameNode server side. */
   private final AlignmentContext alignmentContext;
 
@@ -88,6 +96,24 @@ public class ObserverReadProxyProvider
   private boolean observerReadEnabled;
 
   /**
+   * This adjusts how frequently this proxy provider should auto-msync to the
+   * Active NameNode, automatically performing an msync() call to the active
+   * to fetch the current transaction ID before submitting read requests to
+   * observer nodes. See HDFS-14211 for more description of this feature.
+   * If this is below 0, never auto-msync. If this is 0, perform an msync on
+   * every read operation. If this is above 0, perform an msync after this many
+   * ms have elapsed since the last msync.
+   */
+  private final long autoMsyncPeriodMs;
+
+  /**
+   * The time, in millisecond epoch, that the last msync operation was
+   * performed. This includes any implicit msync (any operation which is
+   * serviced by the Active NameNode).
+   */
+  private volatile long lastMsyncTimeMs = -1;
+
+  /**
* A client using an ObserverReadProxyProvider should first sync with the
* active NameNode on startup. This ensures that the client reads data which
* is consistent with the state of the world as of the time of its
@@ -154,6 +180,12 @@ public class ObserverReadProxyProvider
 ObserverReadInvocationHandler.class.getClassLoader(),
 new Class[] {xface}, new ObserverReadInvocationHandler());
 combinedProxy = new ProxyInfo<>(wrappedProxy, combinedInfo.toString());
+
+autoMsyncPeriodMs = conf.getTimeDuration(
+// The host of the URI is the nameservice ID
+AUTO_MSYNC_PERIOD_KEY_PREFIX + "." + uri.getHost(),
+AUTO_MSYNC_PERIOD_DEFAULT, TimeUnit.MILLISECONDS);
+
 // TODO : make this configurable or remove this variable
 this.observerReadEnabled = true;
   }
@@ -247,6 +279,35 @@ public class ObserverReadProxyProvider
 }
 failoverProxy.getProxy().proxy.msync();
 msynced = true;
+lastMsyncTimeMs = Time.monotonicNow();
+  }
+
+  /**
+   * This will call {@link ClientProtocol#msync()} on the active NameNode
+   * (via the {@link #failoverProxy}) to update the state of this client, only
+   * if at least {@link #autoMsyncPeriodMs} ms has elapsed since the last time
+   * an msync was performed.
+   *
+   * @see #autoMsyncPeriodMs
+   */
+  private void autoMsyncIfNecessary() throws IOException {
+if (autoMsyncPeriodMs == 0) {
+  // Always msync
+  failoverProxy.getProxy().proxy.msync();
+} else if (autoMsyncPeriodMs > 0) {
+  if (Time.monotonicNow() - lastMsyncTimeMs > autoMsyncPeriodMs) {
+

[hadoop] 44/50: HDFS-14170. [SBN read] Fix checkstyle warnings related to SBN reads. Contributed by Konstantin V Shvachko.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 380668559f9b99df2bc3a922d8d644b0f9c2f049
Author: Konstantin V Shvachko 
AuthorDate: Mon Dec 24 09:39:20 2018 -0800

HDFS-14170. [SBN read] Fix checkstyle warnings related to SBN reads. 
Contributed by Konstantin V Shvachko.
---
 .../hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java  | 2 +-
 .../hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java   | 6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 85251e1..9287b5a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -589,7 +589,7 @@ public class RPC {
 
   /**
* Get a protocol proxy that contains a proxy connection to a remote server
-   * and a set of methods that are supported by the server
+   * and a set of methods that are supported by the server.
*
* @param protocol protocol
* @param clientVersion client's version
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index 96932a7..7540508 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -110,7 +110,7 @@ public class ObserverReadProxyProvider
   public ObserverReadProxyProvider(
   Configuration conf, URI uri, Class xface, HAProxyFactory factory) {
 this(conf, uri, xface, factory,
-new ConfiguredFailoverProxyProvider<>(conf, uri, xface,factory));
+new ConfiguredFailoverProxyProvider<>(conf, uri, xface, factory));
   }
 
   @SuppressWarnings("unchecked")
@@ -144,7 +144,7 @@ public class ObserverReadProxyProvider
 combinedInfo.append(']');
 T wrappedProxy = (T) Proxy.newProxyInstance(
 ObserverReadInvocationHandler.class.getClassLoader(),
-new Class[] { xface }, new ObserverReadInvocationHandler());
+new Class[] {xface}, new ObserverReadInvocationHandler());
 combinedProxy = new ProxyInfo<>(wrappedProxy, combinedInfo.toString());
 // TODO : make this configurable or remove this variable
 this.observerReadEnabled = true;
@@ -232,7 +232,7 @@ public class ObserverReadProxyProvider
   /**
* An InvocationHandler to handle incoming requests. This class's invoke
* method contains the primary logic for redirecting to observers.
-   * 
+   *
* If observer reads are enabled, attempt to send read operations to the
* current proxy. If it is not an observer, or the observer fails, adjust
* the current proxy and retry on the next one. If all proxies are tried


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 40/50: HDFS-14116. [SBN read] Fix class cast error in NNThroughputBenchmark with ObserverReadProxyProvider. Contributed by Chao Sun.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 6e19a945bc7f980a3c538a743bbe3063494325a1
Author: Chao Sun 
AuthorDate: Mon Dec 17 16:32:54 2018 -0800

HDFS-14116. [SBN read] Fix class cast error in NNThroughputBenchmark with 
ObserverReadProxyProvider. Contributed by Chao Sun.
---
 .../src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java| 12 +---
 .../hadoop/hdfs/server/namenode/NNThroughputBenchmark.java   |  6 --
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 1411a7f..2177301 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -70,7 +70,6 @@ import java.util.Random;
 import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
 
 import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
@@ -1980,18 +1979,17 @@ public class DFSTestUtil {
* Get the RefreshUserMappingsProtocol RPC proxy for the NN associated with
* this DFSClient object
*
-   * @param nameNodeUri the URI of the NN to get a proxy for.
+   * @param nnAddr the address of the NN to get a proxy for.
*
* @return the RefreshUserMappingsProtocol RPC proxy associated with this
* DFSClient object
*/
   @VisibleForTesting
   public static RefreshUserMappingsProtocol 
getRefreshUserMappingsProtocolProxy(
-  Configuration conf, URI nameNodeUri) throws IOException {
-final AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
-return NameNodeProxies.createProxy(conf,
-nameNodeUri, RefreshUserMappingsProtocol.class,
-nnFallbackToSimpleAuth).getProxy();
+  Configuration conf, InetSocketAddress nnAddr) throws IOException {
+return NameNodeProxies.createNonHAProxy(
+conf, nnAddr, RefreshUserMappingsProtocol.class,
+UserGroupInformation.getCurrentUser(), false).getProxy();
   }
 
   /**
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index c2e05c9..4e0bce8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -1523,10 +1524,11 @@ public class NNThroughputBenchmark implements Tool {
 nameNodeProto = DFSTestUtil.getNamenodeProtocolProxy(config, nnUri,
 UserGroupInformation.getCurrentUser());
 clientProto = dfs.getClient().getNamenode();
+InetSocketAddress nnAddr = DFSUtilClient.getNNAddress(nnUri);
 dataNodeProto = new DatanodeProtocolClientSideTranslatorPB(
-DFSUtilClient.getNNAddress(nnUri), config);
+nnAddr, config);
 refreshUserMappingsProto =
-DFSTestUtil.getRefreshUserMappingsProtocolProxy(config, nnUri);
+DFSTestUtil.getRefreshUserMappingsProtocolProxy(config, nnAddr);
 getBlockPoolId(dfs);
   }
   // run each benchmark


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 37/50: HDFS-13873. [SBN read] ObserverNode should reject read requests when it is too far behind. Contributed by Konstantin Shvachko.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 7b1e3c49d1508a30e6b8507cd5a99732e338e2d9
Author: Erik Krogen 
AuthorDate: Thu Dec 13 14:31:41 2018 -0800

HDFS-13873. [SBN read] ObserverNode should reject read requests when it is 
too far behind. Contributed by Konstantin Shvachko.
---
 .../org/apache/hadoop/ipc/AlignmentContext.java| 12 -
 .../main/java/org/apache/hadoop/ipc/Server.java| 31 -
 .../org/apache/hadoop/hdfs/ClientGSIContext.java   |  7 ++-
 .../hdfs/server/namenode/GlobalStateIdContext.java | 52 +++---
 .../hadoop/hdfs/server/namenode/ha/HATestUtil.java | 20 +
 .../server/namenode/ha/TestMultiObserverNode.java  | 14 ++
 6 files changed, 115 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
index a435ff6..bcddfbf 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ipc;
 
+import java.io.IOException;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto;
@@ -64,9 +66,15 @@ public interface AlignmentContext {
* client state info during RPC response header processing.
*
* @param header The RPC request header.
-   * @return state id of in the request header.
+   * @param threshold a parameter to verify a condition when server
+   *should reject client request due to its state being too far
+   *misaligned with the client state.
+   *See implementation for more details.
+   * @return state id required for the server to execute the call.
+   * @throws IOException
*/
-  long receiveRequestState(RpcRequestHeaderProto header);
+  long receiveRequestState(RpcRequestHeaderProto header, long threshold)
+  throws IOException;
 
   /**
* Returns the last seen state id of the alignment context instance.
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 1cbf8b8..3d49b68 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -2531,6 +2531,7 @@ public abstract class Server {
 
   // Save the priority level assignment by the scheduler
   call.setPriorityLevel(callQueue.getPriorityLevel(call));
+  call.markCallCoordinated(false);
   if(alignmentContext != null && call.rpcRequest != null &&
   (call.rpcRequest instanceof ProtobufRpcEngine.RpcProtobufRequest)) {
 // if call.rpcRequest is not RpcProtobufRequest, will skip the 
following
@@ -2539,23 +2540,21 @@ public abstract class Server {
 // coordinated.
 String methodName;
 String protoName;
+ProtobufRpcEngine.RpcProtobufRequest req =
+(ProtobufRpcEngine.RpcProtobufRequest) call.rpcRequest;
 try {
-  ProtobufRpcEngine.RpcProtobufRequest req =
-  (ProtobufRpcEngine.RpcProtobufRequest) call.rpcRequest;
   methodName = req.getRequestHeader().getMethodName();
   protoName = req.getRequestHeader().getDeclaringClassProtocolName();
+  if (alignmentContext.isCoordinatedCall(protoName, methodName)) {
+call.markCallCoordinated(true);
+long stateId;
+stateId = alignmentContext.receiveRequestState(
+header, getMaxIdleTime());
+call.setClientStateId(stateId);
+  }
 } catch (IOException ioe) {
-  throw new RpcServerException("Rpc request header check fail", ioe);
-}
-if (!alignmentContext.isCoordinatedCall(protoName, methodName)) {
-  call.markCallCoordinated(false);
-} else {
-  call.markCallCoordinated(true);
-  long stateId = alignmentContext.receiveRequestState(header);
-  call.setClientStateId(stateId);
+  throw new RpcServerException("Processing RPC request caught ", ioe);
 }
-  } else {
-call.markCallCoordinated(false);
   }
 
   try {
@@ -3607,4 +3606,12 @@ public abstract class Server {
   idleScanTimer.schedule(idleScanTask, idleScanInterval);
 }
   }
+
+  protected int getMaxIdleTime() {
+return connectionManager.maxIdleTime;
+  }
+
+  public String getServerName() {
+return serverName;
+  }
 }
diff --git 

[hadoop] 38/50: HDFS-14138. [SBN read] Description errors in the comparison logic of transaction ID. Contributed by xiangheng.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 2e7610a0295ca72fc59858eac75e73baba8bf729
Author: Chen Liang 
AuthorDate: Fri Dec 14 13:06:38 2018 -0800

HDFS-14138. [SBN read] Description errors in the comparison logic of 
transaction ID. Contributed by xiangheng.
---
 .../hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 3d49b68..c684314 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -2741,7 +2741,7 @@ public abstract class Server {
   call.getClientStateId() > alignmentContext.getLastSeenStateId()) 
{
 /*
  * The call processing should be postponed until the client call's
- * state id is aligned (>=) with the server state id.
+ * state id is aligned (<=) with the server state id.
 
  * NOTE:
  * Inserting the call back to the queue can change the order of 
call


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 43/50: HDFS-14154. [SBN read] Document dfs.ha.tail-edits.period in user guide. Contributed by Chao Sun.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9c1c73a0d572d58cdde5aeaa552229309edc8d8c
Author: Chao Sun 
AuthorDate: Wed Dec 19 22:20:31 2018 -0800

HDFS-14154. [SBN read] Document dfs.ha.tail-edits.period in user guide. 
Contributed by Chao Sun.
---
 .../src/site/markdown/HDFSHighAvailabilityWithQJM.md   |  7 ++-
 .../hadoop-hdfs/src/site/markdown/ObserverNameNode.md  | 18 --
 2 files changed, 22 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
index 3b5157f..9557018 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
@@ -27,7 +27,12 @@ This document assumes that the reader has a general 
understanding of general com
 Note: Using the Quorum Journal Manager or Conventional Shared Storage
 -
 
-This guide discusses how to configure and use HDFS HA using the Quorum Journal 
Manager (QJM) to share edit logs between the Active and Standby NameNodes. For 
information on how to configure HDFS HA using NFS for shared storage instead of 
the QJM, please see [this alternative 
guide.](./HDFSHighAvailabilityWithNFS.html)
+This guide discusses how to configure and use HDFS HA using the Quorum
+Journal Manager (QJM) to share edit logs between the Active and Standby
+NameNodes. For information on how to configure HDFS HA using NFS for
+shared storage instead of the QJM, please see [this alternative
+guide.](./HDFSHighAvailabilityWithNFS.html). For information on how to
+configure HDFS HA with Observer NameNode, please see [this 
guide](./ObserverNameNode.html)
 
 Background
 --
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ObserverNameNode.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ObserverNameNode.md
index 2548315..d93256c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ObserverNameNode.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ObserverNameNode.md
@@ -23,13 +23,13 @@ Purpose
 This guide provides an overview of the HDFS Observer NameNode feature
 and how to configure/install it in a typical HA-enabled cluster. For a
 detailed technical design overview, please check the doc attached to
-HDFS-12943.
+[HDFS-12943](https://issues.apache.org/jira/browse/HDFS-12943).
 
 Background
 ---
 
 In a HA-enabled HDFS cluster (for more information, check
-[HDFSHighAvailabilityWithQJM](./HDFSHighAvailabilityWithQJM.md)), there
+[HDFSHighAvailabilityWithQJM](./HDFSHighAvailabilityWithQJM.html)), there
 is a single Active NameNode and one or more Standby NameNode(s). The
 Active NameNode is responsible for serving all client requests, while
 Standby NameNode just keep the up-to-date information regarding the
@@ -104,6 +104,20 @@ few configurations to your **hdfs-site.xml**:
   true
 
 
+*  **dfs.ha.tail-edits.period** - how often Standby/Observer NameNodes
+   should fetch edits from JournalNodes.
+
+   This determines the staleness of Observer NameNode w.r.t the Active.
+   If too large, RPC time will increase as client requests will wait
+   longer in the RPC queue before Observer tails edit logs and catches
+   up the latest state of Active. The default value is 1min. It is
+   **highly recommend** to configure this to a much lower value.
+
+
+  dfs.ha.tail-edits.period
+  0ms
+
+
 *  **dfs.journalnode.edit-cache-size.bytes** - the in-memory cache size,
in bytes, on the JournalNodes.
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 36/50: HDFS-14142. Move ipfailover config key out of HdfsClientConfigKeys. Contributed by Chen Liang.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 82f68a47c4f259aeb4585623485e309b4319c430
Author: Chen Liang 
AuthorDate: Wed Dec 12 10:39:39 2018 -0800

HDFS-14142. Move ipfailover config key out of HdfsClientConfigKeys. 
Contributed by Chen Liang.
---
 .../java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java | 3 ---
 .../namenode/ha/ObserverReadProxyProviderWithIPFailover.java | 9 +
 2 files changed, 5 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 00fb12d..52a7cd0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -181,9 +181,6 @@ public interface HdfsClientConfigKeys {
   String DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES =
   "dfs.namenode.snapshot.capture.openfiles";
   boolean DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES_DEFAULT = false;
-  
-  String DFS_CLIENT_FAILOVER_IPFAILOVER_VIRTUAL_ADDRESS =
-  Failover.PREFIX + "ipfailover.virtual-address";
 
   /**
* These are deprecated config keys to client code.
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
index 22f6dd3..fc12386 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
@@ -22,13 +22,12 @@ import java.net.URI;
 
 import java.util.Collections;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.HAUtilClient;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_FAILOVER_IPFAILOVER_VIRTUAL_ADDRESS;
-
 /**
  * Extends {@link ObserverReadProxyProvider} to support NameNode IP failover.
  *
@@ -81,6 +80,9 @@ public class ObserverReadProxyProviderWithIPFailover
   private static final Logger LOG = LoggerFactory.getLogger(
   ObserverReadProxyProviderWithIPFailover.class);
 
+  private static final String IPFAILOVER_CONFIG_PREFIX =
+  HdfsClientConfigKeys.Failover.PREFIX + "ipfailover.virtual-address";
+
   /**
* By default ObserverReadProxyProviderWithIPFailover
* uses {@link IPFailoverProxyProvider} for failover.
@@ -123,8 +125,7 @@ public class ObserverReadProxyProviderWithIPFailover
 
   private static URI getFailoverVirtualIP(
   Configuration conf, String nameServiceID) {
-String configKey = DFS_CLIENT_FAILOVER_IPFAILOVER_VIRTUAL_ADDRESS
-+ "." + nameServiceID;
+String configKey = IPFAILOVER_CONFIG_PREFIX + "." + nameServiceID;
 String virtualIP = conf.get(configKey);
 LOG.info("Name service ID {} will use virtual IP {} for failover",
 nameServiceID, virtualIP);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 41/50: HDFS-14149. [SBN read] Fix annotations on new interfaces/classes for SBN reads. Contributed by Chao Sun.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9aecfdcb5277668d80f42355aa62bc600d2704c4
Author: Chao Sun 
AuthorDate: Mon Dec 17 16:53:43 2018 -0800

HDFS-14149. [SBN read] Fix annotations on new interfaces/classes for SBN 
reads. Contributed by Chao Sun.
---
 .../src/main/java/org/apache/hadoop/ipc/AlignmentContext.java   | 2 +-
 .../java/org/apache/hadoop/ipc/ObserverRetryOnActiveException.java  | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/ClientGSIContext.java  | 2 +-
 .../hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java   | 4 
 .../server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java | 6 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/ha/ReadOnly.java| 3 +++
 .../org/apache/hadoop/hdfs/qjournal/server/JournaledEditsCache.java | 5 +
 .../apache/hadoop/hdfs/server/namenode/GlobalStateIdContext.java| 2 +-
 8 files changed, 22 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
index bcddfbf..fbf825b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
@@ -34,7 +34,7 @@ import 
org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
  * to client.
  */
 @InterfaceAudience.Private
-@InterfaceStability.Stable
+@InterfaceStability.Evolving
 public interface AlignmentContext {
 
   /**
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ObserverRetryOnActiveException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ObserverRetryOnActiveException.java
index 7e67b0c..336b304 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ObserverRetryOnActiveException.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ObserverRetryOnActiveException.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ipc;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
 import java.io.IOException;
@@ -26,6 +27,7 @@ import java.io.IOException;
  * client should retry active namenode directly (instead of retry other
  * ObserverNodes).
  */
+@InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class ObserverRetryOnActiveException extends IOException {
   static final long serialVersionUID = 1L;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientGSIContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientGSIContext.java
index a7bdd14..9b324bd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientGSIContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientGSIContext.java
@@ -34,7 +34,7 @@ import java.util.concurrent.atomic.LongAccumulator;
  * state alignment info from server(s).
  */
 @InterfaceAudience.Private
-@InterfaceStability.Stable
+@InterfaceStability.Evolving
 public class ClientGSIContext implements AlignmentContext {
 
   private final LongAccumulator lastSeenStateId =
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index 5d56c59..e9d53f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -26,6 +26,8 @@ import java.lang.reflect.Proxy;
 import java.net.URI;
 import java.util.List;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.ClientGSIContext;
@@ -58,6 +60,8 @@ import com.google.common.annotations.VisibleForTesting;
  * Read and write requests will still be sent to active NN if reading from
  * observer is turned off.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class ObserverReadProxyProvider
 extends AbstractNNFailoverProxyProvider {
   private static final Logger LOG = LoggerFactory.getLogger(
diff --git 

[hadoop] 01/50: HDFS-12975. [SBN read] Changes to the NameNode to support reads from standby. Contributed by Chao Sun.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 44d8b436974a9f36327b433275fafc99d7a1da31
Author: Chao Sun 
AuthorDate: Tue Mar 20 18:37:59 2018 -0700

HDFS-12975. [SBN read] Changes to the NameNode to support reads from 
standby. Contributed by Chao Sun.
---
 .../hdfs/server/common/HdfsServerConstants.java|  4 +++-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  | 14 ++-
 .../hadoop/hdfs/server/namenode/NameNode.java  | 28 --
 .../hdfs/server/namenode/NameNodeRpcServer.java|  2 +-
 .../hdfs/server/namenode/ha/StandbyState.java  | 17 +
 5 files changed, 55 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index c3098f3..7434347 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -156,7 +156,9 @@ public interface HdfsServerConstants {
 // only used for StorageDirectory.analyzeStorage() in hot swap drive 
scenario.
 // TODO refactor StorageDirectory.analyzeStorage() so that we can do away 
with
 // this in StartupOption.
-HOTSWAP("-hotswap");
+HOTSWAP("-hotswap"),
+// Startup the namenode in observer mode.
+OBSERVER("-observer");
 
 private static final Pattern ENUM_WITH_ROLLING_UPGRADE_OPTION = 
Pattern.compile(
 "(\\w+)\\((\\w+)\\)");
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0677fdf..e33ad5c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -529,7 +529,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   private final ReentrantLock cpLock;
 
   /**
-   * Used when this NN is in standby state to read from the shared edit log.
+   * Used when this NN is in standby or observer state to read from the
+   * shared edit log.
*/
   private EditLogTailer editLogTailer = null;
 
@@ -1356,24 +1357,25 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   }
   
   /**
-   * Start services required in standby state 
+   * Start services required in standby or observer state
* 
* @throws IOException
*/
-  void startStandbyServices(final Configuration conf) throws IOException {
-LOG.info("Starting services required for standby state");
+  void startStandbyServices(final Configuration conf, boolean isObserver)
+  throws IOException {
+LOG.info("Starting services required for " +
+(isObserver ? "observer" : "standby") + " state");
 if (!getFSImage().editLog.isOpenForRead()) {
   // During startup, we're already open for read.
   getFSImage().editLog.initSharedJournalsForRead();
 }
-
 blockManager.setPostponeBlocksFromFuture(true);
 
 // Disable quota checks while in standby.
 dir.disableQuotaChecks();
 editLogTailer = new EditLogTailer(this, conf);
 editLogTailer.start();
-if (standbyShouldCheckpoint) {
+if (!isObserver && standbyShouldCheckpoint) {
   standbyCheckpointer = new StandbyCheckpointer(conf, this);
   standbyCheckpointer.start();
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 36120c6..731db4b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -355,6 +355,7 @@ public class NameNode extends ReconfigurableBase implements
   LoggerFactory.getLogger("BlockStateChange");
   public static final HAState ACTIVE_STATE = new ActiveState();
   public static final HAState STANDBY_STATE = new StandbyState();
+  public static final HAState OBSERVER_STATE = new StandbyState(true);
 
   private static final String NAMENODE_HTRACE_PREFIX = "namenode.htrace.";
 
@@ -944,9 +945,11 @@ public class NameNode extends ReconfigurableBase implements
   }
 
   protected HAState createHAState(StartupOption startOpt) {
-if (!haEnabled || startOpt == StartupOption.UPGRADE 
+if (!haEnabled || startOpt == StartupOption.UPGRADE
 || startOpt == 

[hadoop] 26/50: HDFS-13523. Support observer nodes in MiniDFSCluster. Contributed by Konstantin Shvachko.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 7b425c87030b3e720e1ebc69f3fdbc8d867654b6
Author: Konstantin V Shvachko 
AuthorDate: Thu Oct 11 22:39:17 2018 -0700

HDFS-13523. Support observer nodes in MiniDFSCluster. Contributed by 
Konstantin Shvachko.
---
 .../hdfs/TestStateAlignmentContextWithHA.java  | 61 ++
 .../hadoop/hdfs/server/namenode/ha/HATestUtil.java | 40 ++
 .../namenode/ha/TestConsistentReadsObserver.java   | 19 ++-
 .../server/namenode/ha/TestMultiObserverNode.java  | 16 ++
 .../hdfs/server/namenode/ha/TestObserverNode.java  | 16 ++
 5 files changed, 67 insertions(+), 85 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
index a642872..3dbeea7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
@@ -18,14 +18,15 @@
 
 package org.apache.hadoop.hdfs;
 
-import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAProxyFactory;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider;
@@ -66,6 +67,7 @@ public class TestStateAlignmentContextWithHA {
   private static final Configuration CONF = new HdfsConfiguration();
   private static final List AC_LIST = new ArrayList<>();
 
+  private static MiniQJMHACluster qjmhaCluster;
   private static MiniDFSCluster cluster;
   private static List clients;
 
@@ -87,33 +89,26 @@ public class TestStateAlignmentContextWithHA {
 
   @BeforeClass
   public static void startUpCluster() throws IOException {
-// disable block scanner
-CONF.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
 // Set short retry timeouts so this test runs faster
 CONF.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
-CONF.setBoolean("fs.hdfs.impl.disable.cache", true);
+CONF.setBoolean(String.format(
+"fs.%s.impl.disable.cache", HdfsConstants.HDFS_URI_SCHEME), true);
+CONF.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, NUMDATANODES);
 
-cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(NUMDATANODES)
-.nnTopology(MiniDFSNNTopology.simpleHATopology(3))
-.build();
-cluster.waitActive();
-cluster.transitionToActive(0);
-cluster.transitionToObserver(2);
-
-HATestUtil.setupHAConfiguration(
-cluster, CONF, 0, ORPPwithAlignmentContexts.class);
+qjmhaCluster = HATestUtil.setUpObserverCluster(CONF, 1, NUMDATANODES, 
true);
+cluster = qjmhaCluster.getDfsCluster();
   }
 
   @Before
   public void before() throws IOException, URISyntaxException {
-dfs = (DistributedFileSystem) FileSystem.get(CONF);
+dfs = HATestUtil.configureObserverReadFs(
+cluster, CONF, ORPPwithAlignmentContexts.class, true);
   }
 
   @AfterClass
   public static void shutDownCluster() throws IOException {
-if (cluster != null) {
-  cluster.shutdown();
-  cluster = null;
+if (qjmhaCluster != null) {
+  qjmhaCluster.shutdown();
 }
   }
 
@@ -144,9 +139,9 @@ public class TestStateAlignmentContextWithHA {
 long postWriteState =
 cluster.getNamesystem(active).getLastWrittenTransactionId();
 // Write(s) should have increased state. Check for greater than.
-assertThat(clientState > preWriteState, is(true));
+assertTrue(clientState > preWriteState);
 // Client and server state should be equal.
-assertThat(clientState, is(postWriteState));
+assertEquals(clientState, postWriteState);
   }
 
   /**
@@ -161,7 +156,7 @@ public class TestStateAlignmentContextWithHA {
 DFSTestUtil.readFile(dfs, new Path("/testFile2"));
 // Read should catch client up to last written state.
 long clientState = getContext(0).getLastSeenStateId();
-assertThat(clientState, is(lastWrittenId));
+assertEquals(clientState, lastWrittenId);
   }
 
   /**
@@ -173,12 +168,12 @@ public class TestStateAlignmentContextWithHA {
 DFSTestUtil.writeFile(dfs, new Path("/testFile3"), "ezpz");
 long lastWrittenId =
 

[hadoop] 31/50: HDFS-14017. [SBN read] ObserverReadProxyProviderWithIPFailover should work with HA configuration. Contributed by Chen Liang.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 96cdd13de58c4b4bbb57751642547f53405fda9e
Author: Chen Liang 
AuthorDate: Fri Nov 16 17:30:29 2018 -0800

HDFS-14017. [SBN read] ObserverReadProxyProviderWithIPFailover should work 
with HA configuration. Contributed by Chen Liang.
---
 .../hadoop/hdfs/client/HdfsClientConfigKeys.java   |  3 +
 .../ObserverReadProxyProviderWithIPFailover.java   | 97 +++---
 2 files changed, 89 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 52a7cd0..00fb12d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -181,6 +181,9 @@ public interface HdfsClientConfigKeys {
   String DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES =
   "dfs.namenode.snapshot.capture.openfiles";
   boolean DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES_DEFAULT = false;
+  
+  String DFS_CLIENT_FAILOVER_IPFAILOVER_VIRTUAL_ADDRESS =
+  Failover.PREFIX + "ipfailover.virtual-address";
 
   /**
* These are deprecated config keys to client code.
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
index 1dbd02c..751bc3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
@@ -17,24 +17,99 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import java.io.IOException;
 import java.net.URI;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_FAILOVER_IPFAILOVER_VIRTUAL_ADDRESS;
 
 /**
- * ObserverReadProxyProvider with IPFailoverProxyProvider
- * as the failover method.
+ * Extends {@link ObserverReadProxyProvider} to support NameNode IP failover.
+ *
+ * For Observer reads a client needs to know physical addresses of all
+ * NameNodes, so that it could switch between active and observer nodes
+ * for write and read requests.
+ *
+ * Traditional {@link IPFailoverProxyProvider} works with a virtual
+ * address of the NameNode. If active NameNode fails the virtual address
+ * is assigned to the standby NameNode, and IPFailoverProxyProvider, which
+ * keeps talking to the same virtual address is in fact now connects to
+ * the new physical server.
+ *
+ * To combine these behaviors ObserverReadProxyProviderWithIPFailover
+ * should both
+ * 
+ *  Maintain all physical addresses of NameNodes in order to allow
+ * observer reads, and
+ *  Should rely on the virtual address of the NameNode in order to
+ * perform failover by assuming that the virtual address always points
+ * to the active NameNode.
+ * 
+ *
+ * An example of a configuration to leverage
+ * ObserverReadProxyProviderWithIPFailover
+ * should include the following values:
+ * {@code
+ * fs.defaultFS = hdfs://mycluster
+ * dfs.nameservices = mycluster
+ * dfs.ha.namenodes.mycluster = ha1,ha2
+ * dfs.namenode.rpc-address.mycluster.ha1 = nn01-ha1.com:8020
+ * dfs.namenode.rpc-address.mycluster.ha2 = nn01-ha2.com:8020
+ * dfs.client.failover.ipfailover.virtual-address.mycluster = nn01.com:8020
+ * dfs.client.failover.proxy.provider.mycluster =
+ * org.apache...ObserverReadProxyProviderWithIPFailover
+ * }
+ * Here {@code nn01.com:8020} is the virtual address of the active NameNode,
+ * while {@code nn01-ha1.com:8020} and {@code nn01-ha2.com:8020}
+ * are the physically addresses the two NameNodes.
+ *
+ * With this configuration, client will use
+ * ObserverReadProxyProviderWithIPFailover, which creates proxies for both
+ * nn01-ha1 and nn01-ha2, used for read/write RPC calls, but for the failover,
+ * it relies on the virtual address nn01.com
  */
-public class
-ObserverReadProxyProviderWithIPFailover
-extends ObserverReadProxyProvider {
 
+public class ObserverReadProxyProviderWithIPFailover
+extends ObserverReadProxyProvider {
+  private static final Logger LOG = LoggerFactory.getLogger(
+  ObserverReadProxyProviderWithIPFailover.class);
+
+  /**
+   * By default ObserverReadProxyProviderWithIPFailover
+ 

[hadoop] 46/50: HDFS-14279. [SBN read] Fix race condition in ObserverReadProxyProvider. Contributed by Erik Krogen.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 2598282293f0b857c94157a3f0b38a8f30525317
Author: Erik Krogen 
AuthorDate: Thu Feb 14 08:59:56 2019 -0800

HDFS-14279. [SBN read] Fix race condition in ObserverReadProxyProvider. 
Contributed by Erik Krogen.
---
 .../namenode/ha/ObserverReadProxyProvider.java | 23 +-
 1 file changed, 9 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index 7540508..356600f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -92,13 +92,12 @@ public class ObserverReadProxyProvider
* be accessed in synchronized methods.
*/
   private int currentIndex = -1;
+
   /**
-   * The proxy being used currently; this will match with currentIndex above.
-   * This field is volatile to allow reads without synchronization; updates
-   * should still be performed synchronously to maintain consistency between
-   * currentIndex and this field.
+   * The proxy being used currently. Should only be accessed in synchronized
+   * methods.
*/
-  private volatile NNProxyInfo currentProxy;
+  private NNProxyInfo currentProxy;
 
   /** The last proxy that has been used. Only used for testing. */
   private volatile ProxyInfo lastProxy = null;
@@ -191,10 +190,7 @@ public class ObserverReadProxyProvider
* {@link #changeProxy(NNProxyInfo)} to initialize one.
*/
   private NNProxyInfo getCurrentProxy() {
-if (currentProxy == null) {
-  changeProxy(null);
-}
-return currentProxy;
+return changeProxy(null);
   }
 
   /**
@@ -205,15 +201,13 @@ public class ObserverReadProxyProvider
* returning.
*
* @param initial The expected current proxy
+   * @return The new proxy that should be used.
*/
-  private synchronized void changeProxy(NNProxyInfo initial) {
+  private synchronized NNProxyInfo changeProxy(NNProxyInfo initial) {
 if (currentProxy != initial) {
   // Must have been a concurrent modification; ignore the move request
-  return;
+  return currentProxy;
 }
-// Attempt to force concurrent callers of getCurrentProxy to wait for the
-// new proxy; best-effort by setting currentProxy to null
-currentProxy = null;
 currentIndex = (currentIndex + 1) % nameNodeProxies.size();
 currentProxy = createProxyIfNeeded(nameNodeProxies.get(currentIndex));
 try {
@@ -227,6 +221,7 @@ public class ObserverReadProxyProvider
 LOG.debug("Changed current proxy from {} to {}",
 initial == null ? "none" : initial.proxyInfo,
 currentProxy.proxyInfo);
+return currentProxy;
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 28/50: HDFS-13924. [SBN read] Handle BlockMissingException when reading from observer. Contributed by Chao Sun.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 11897618b98eb7c4421511dceed4921b25c9cace
Author: Chao Sun 
AuthorDate: Tue Oct 23 22:36:23 2018 -0700

HDFS-13924. [SBN read] Handle BlockMissingException when reading from 
observer. Contributed by Chao Sun.
---
 .../hadoop/ipc/ObserverRetryOnActiveException.java | 35 ++
 .../namenode/ha/ObserverReadProxyProvider.java | 12 
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  9 ++
 .../hdfs/server/namenode/ha/TestObserverNode.java  | 35 --
 .../namenode/ha/TestObserverReadProxyProvider.java | 32 
 5 files changed, 121 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ObserverRetryOnActiveException.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ObserverRetryOnActiveException.java
new file mode 100644
index 000..7e67b0c
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ObserverRetryOnActiveException.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.IOException;
+
+/**
+ * Thrown by a remote ObserverNode indicating the operation has failed and the
+ * client should retry active namenode directly (instead of retry other
+ * ObserverNodes).
+ */
+@InterfaceStability.Evolving
+public class ObserverRetryOnActiveException extends IOException {
+  static final long serialVersionUID = 1L;
+  public ObserverRetryOnActiveException(String msg) {
+super(msg);
+  }
+}
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index 690ee0b..87ca718 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -37,7 +37,9 @@ import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
 import org.apache.hadoop.ipc.AlignmentContext;
+import org.apache.hadoop.ipc.ObserverRetryOnActiveException;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -263,6 +265,16 @@ public class ObserverReadProxyProvider
   throw ite.getCause();
 }
 Exception e = (Exception) ite.getCause();
+if (e instanceof RemoteException) {
+  RemoteException re = (RemoteException) e;
+  Exception unwrapped = re.unwrapRemoteException(
+  ObserverRetryOnActiveException.class);
+  if (unwrapped instanceof ObserverRetryOnActiveException) {
+LOG.info("Encountered ObserverRetryOnActiveException from {}." 
+
+" Retry active namenode directly.", current.proxyInfo);
+break;
+  }
+}
 RetryAction retryInfo = observerRetryPolicy.shouldRetry(e, 0, 0,
 method.isAnnotationPresent(Idempotent.class)
 || method.isAnnotationPresent(AtMostOnce.class));
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 4dfc24a..2d3aaaf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -280,6 +280,7 @@ import 

[hadoop] 45/50: HDFS-14250. [SBN read]. msync should always direct to active NameNode to get latest stateID. Contributed by Chao Sun.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 45cd309a406004007bd96dc8deb17fa08f0ab8d8
Author: Erik Krogen 
AuthorDate: Wed Feb 6 09:54:47 2019 -0800

HDFS-14250. [SBN read]. msync should always direct to active NameNode to 
get latest stateID. Contributed by Chao Sun.
---
 .../hadoop/hdfs/protocol/ClientProtocol.java   |  2 +-
 .../namenode/ha/TestConsistentReadsObserver.java   | 47 +-
 2 files changed, 47 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 9c4f39e..a0040f9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1754,6 +1754,6 @@ public interface ClientProtocol {
* @throws IOException
*/
   @Idempotent
-  @ReadOnly(isCoordinated = true)
+  @ReadOnly(activeOnly = true)
   void msync() throws IOException;
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
index fe5345d..2845670 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestConsistentReadsObserver.java
@@ -26,6 +26,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -150,7 +151,51 @@ public class TestConsistentReadsObserver {
 assertEquals(1, readStatus.get());
   }
 
-  // @Ignore("Move to another test file")
+  @Test
+  public void testMsync() throws Exception {
+// 0 == not completed, 1 == succeeded, -1 == failed
+AtomicInteger readStatus = new AtomicInteger(0);
+Configuration conf2 = new Configuration(conf);
+
+// Disable FS cache so two different DFS clients will be used.
+conf2.setBoolean("fs.hdfs.impl.disable.cache", true);
+DistributedFileSystem dfs2 = (DistributedFileSystem) FileSystem.get(conf2);
+
+// Initialize the proxies for Observer Node.
+dfs.getClient().getHAServiceState();
+dfs2.getClient().getHAServiceState();
+
+// Advance Observer's state ID so it is ahead of client's.
+dfs.mkdir(new Path("/test"), FsPermission.getDefault());
+dfsCluster.rollEditLogAndTail(0);
+
+dfs.mkdir(testPath, FsPermission.getDefault());
+assertSentTo(0);
+
+Thread reader = new Thread(() -> {
+  try {
+// After msync, client should have the latest state ID from active.
+// Therefore, the subsequent getFileStatus call should succeed.
+dfs2.getClient().msync();
+dfs2.getFileStatus(testPath);
+readStatus.set(1);
+  } catch (IOException e) {
+e.printStackTrace();
+readStatus.set(-1);
+  }
+});
+
+reader.start();
+
+Thread.sleep(100);
+assertEquals(0, readStatus.get());
+
+dfsCluster.rollEditLogAndTail(0);
+
+GenericTestUtils.waitFor(() -> readStatus.get() != 0, 100, 1);
+assertEquals(1, readStatus.get());
+  }
+
   @Test
   public void testUncoordinatedCall() throws Exception {
 // make a write call so that client will be ahead of


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 21/50: HDFS-13778. [SBN read] TestStateAlignmentContextWithHA should use real ObserverReadProxyProvider instead of AlignmentContextProxyProvider. Contributed by Konstantin Shvachko and Plamen

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit ea2d86232041b2f36ef359218e9c0c38a9f07a7e
Author: Konstantin V Shvachko 
AuthorDate: Mon Sep 17 18:25:27 2018 -0700

HDFS-13778. [SBN read] TestStateAlignmentContextWithHA should use real 
ObserverReadProxyProvider instead of AlignmentContextProxyProvider. Contributed 
by Konstantin Shvachko and Plamen Jeliazkov.
---
 .../hdfs/TestStateAlignmentContextWithHA.java  | 186 +++--
 1 file changed, 57 insertions(+), 129 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
index 1acbd75..a494252 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
@@ -18,28 +18,24 @@
 
 package org.apache.hadoop.hdfs;
 
-import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.assertThat;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.ha.ClientHAProxyFactory;
-import 
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAProxyFactory;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
-import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos;
-import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.URI;
@@ -61,55 +57,31 @@ import java.util.concurrent.TimeUnit;
  * to the most recent alignment state of the server.
  */
 public class TestStateAlignmentContextWithHA {
+  public static final Logger LOG =
+  LoggerFactory.getLogger(TestStateAlignmentContextWithHA.class.getName());
 
   private static final int NUMDATANODES = 1;
   private static final int NUMCLIENTS = 10;
-  private static final int NUMFILES = 300;
+  private static final int NUMFILES = 120;
   private static final Configuration CONF = new HdfsConfiguration();
-  private static final String NAMESERVICE = "nameservice";
   private static final List AC_LIST = new ArrayList<>();
 
   private static MiniDFSCluster cluster;
   private static List clients;
-  private static ClientGSIContext spy;
 
   private DistributedFileSystem dfs;
   private int active = 0;
   private int standby = 1;
 
-  static class AlignmentContextProxyProvider
-  extends ConfiguredFailoverProxyProvider {
+  static class ORPPwithAlignmentContexts
+  extends ObserverReadProxyProvider {
 
-private ClientGSIContext alignmentContext;
-
-public AlignmentContextProxyProvider(
+public ORPPwithAlignmentContexts(
 Configuration conf, URI uri, Class xface,
 HAProxyFactory factory) throws IOException {
   super(conf, uri, xface, factory);
 
-  // Create and set AlignmentContext in HAProxyFactory.
-  // All proxies by factory will now have AlignmentContext assigned.
-  this.alignmentContext = (spy != null ? spy : new ClientGSIContext());
-  ((ClientHAProxyFactory) 
factory).setAlignmentContext(alignmentContext);
-
-  AC_LIST.add(alignmentContext);
-}
-  }
-
-  static class SpyConfiguredContextProxyProvider
-  extends ConfiguredFailoverProxyProvider {
-
-private ClientGSIContext alignmentContext;
-
-public SpyConfiguredContextProxyProvider(
-Configuration conf, URI uri, Class xface,
-HAProxyFactory factory) throws IOException {
-  super(conf, uri, xface, factory);
-
-  // Create but DON'T set in HAProxyFactory.
-  this.alignmentContext = (spy != null ? spy : new ClientGSIContext());
-
-  AC_LIST.add(alignmentContext);
+  AC_LIST.add((ClientGSIContext) getAlignmentContext());
 }
   }
 
@@ -121,23 +93,21 @@ public class TestStateAlignmentContextWithHA {
 CONF.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
 CONF.setBoolean("fs.hdfs.impl.disable.cache", true);
 
-MiniDFSNNTopology.NSConf nsConf = new 
MiniDFSNNTopology.NSConf(NAMESERVICE);
-nsConf.addNN(new 

[hadoop] 34/50: HDFS-14120. [SBN read] ORFPP should also clone DT for the virtual IP. Contributed by Chen Liang.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 8769e6feda34713e130a88cabd145a1c74e71591
Author: Chen Liang 
AuthorDate: Mon Dec 3 17:05:07 2018 -0800

HDFS-14120. [SBN read] ORFPP should also clone DT for the virtual IP. 
Contributed by Chen Liang.
---
 .../ObserverReadProxyProviderWithIPFailover.java   | 24 +-
 1 file changed, 23 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
index 751bc3b..22f6dd3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
@@ -17,9 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
+import java.net.InetSocketAddress;
 import java.net.URI;
 
+import java.util.Collections;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HAUtilClient;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -58,7 +61,8 @@ import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_FAIL
  * dfs.ha.namenodes.mycluster = ha1,ha2
  * dfs.namenode.rpc-address.mycluster.ha1 = nn01-ha1.com:8020
  * dfs.namenode.rpc-address.mycluster.ha2 = nn01-ha2.com:8020
- * dfs.client.failover.ipfailover.virtual-address.mycluster = nn01.com:8020
+ * dfs.client.failover.ipfailover.virtual-address.mycluster =
+ * hdfs://nn01.com:8020
  * dfs.client.failover.proxy.provider.mycluster =
  * org.apache...ObserverReadProxyProviderWithIPFailover
  * }
@@ -97,6 +101,24 @@ public class ObserverReadProxyProviderWithIPFailover
   Configuration conf, URI uri, Class xface, HAProxyFactory factory,
   AbstractNNFailoverProxyProvider failoverProxy) {
 super(conf, uri, xface, factory, failoverProxy);
+cloneDelegationTokenForVirtualIP(conf, uri);
+  }
+
+  /**
+   * Clone delegation token for the virtual IP. Specifically
+   * clone the dt that corresponds to the name service uri,
+   * to the configured corresponding virtual IP.
+   *
+   * @param conf configuration
+   * @param haURI the ha uri, a name service id in this case.
+   */
+  private void cloneDelegationTokenForVirtualIP(
+  Configuration conf, URI haURI) {
+URI virtualIPURI = getFailoverVirtualIP(conf, haURI.getHost());
+InetSocketAddress vipAddress = new InetSocketAddress(
+virtualIPURI.getHost(), virtualIPURI.getPort());
+HAUtilClient.cloneDelegationTokenForLogicalUri(
+ugi, haURI, Collections.singleton(vipAddress));
   }
 
   private static URI getFailoverVirtualIP(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 50/50: HDFS-14537. Journaled Edits Cache is not cleared when formatting the JN. Contributed by Ranith Sardar.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b5303012a0c0e839b4bdc1a9b5640425a44a831c
Author: Wei-Chiu Chuang 
AuthorDate: Tue Jun 18 22:28:21 2019 -0700

HDFS-14537. Journaled Edits Cache is not cleared when formatting the JN. 
Contributed by Ranith Sardar.
---
 .../hadoop/hdfs/qjournal/server/Journal.java   | 24 ++
 1 file changed, 16 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 5fb4fc3..02e2a7e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -139,12 +139,14 @@ public class Journal implements Closeable {
   
   private final FileJournalManager fjm;
 
-  private final JournaledEditsCache cache;
+  private JournaledEditsCache cache;
 
   private final JournalMetrics metrics;
 
   private long lastJournalTimestamp = 0;
 
+  private Configuration conf = null;
+
   // This variable tracks, have we tried to start journalsyncer
   // with nameServiceId. This will help not to start the journalsyncer
   // on each rpc call, if it has failed to start
@@ -158,6 +160,7 @@ public class Journal implements Closeable {
   Journal(Configuration conf, File logDir, String journalId,
   StartupOption startOpt, StorageErrorReporter errorReporter)
   throws IOException {
+this.conf = conf;
 storage = new JNStorage(conf, logDir, startOpt, errorReporter);
 this.journalId = journalId;
 
@@ -165,13 +168,8 @@ public class Journal implements Closeable {
 
 this.fjm = storage.getJournalManager();
 
-if (conf.getBoolean(DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY,
-DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_DEFAULT)) {
-  this.cache = new JournaledEditsCache(conf);
-} else {
-  this.cache = null;
-}
-
+this.cache = createCache();
+
 this.metrics = JournalMetrics.create(this);
 
 EditLogFile latest = scanStorageForLatestEdits();
@@ -180,6 +178,15 @@ public class Journal implements Closeable {
 }
   }
 
+  private JournaledEditsCache createCache() {
+if (conf.getBoolean(DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY,
+DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_DEFAULT)) {
+  return new JournaledEditsCache(conf);
+} else {
+  return null;
+}
+  }
+
   public void setTriedJournalSyncerStartedwithnsId(boolean started) {
 this.triedJournalSyncerStartedwithnsId = started;
   }
@@ -249,6 +256,7 @@ public class Journal implements Closeable {
 LOG.info("Formatting journal id : " + journalId + " with namespace info: " 
+
 nsInfo);
 storage.format(nsInfo);
+this.cache = createCache();
 refreshCachedData();
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 49/50: HDFS-14435. [SBN Read] Enable ObserverReadProxyProvider to gracefully handle StandbyException when fetching HAServiceState. Contributed by Erik Krogen.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 32d80d2201c42e4b38d0875f5db8c4ae90702c25
Author: Erik Krogen 
AuthorDate: Wed Apr 17 12:41:48 2019 -0700

HDFS-14435. [SBN Read] Enable ObserverReadProxyProvider to gracefully 
handle StandbyException when fetching HAServiceState. Contributed by Erik 
Krogen.
---
 .../namenode/ha/ObserverReadProxyProvider.java | 38 +-
 .../hadoop/hdfs/server/namenode/ha/HATestUtil.java | 20 +++---
 .../namenode/ha/TestDelegationTokensWithHA.java| 46 ++
 3 files changed, 90 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index 31c2ddf..b79dec6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.ipc.ObserverRetryOnActiveException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RpcInvocationHandler;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -67,7 +68,8 @@ import com.google.common.annotations.VisibleForTesting;
 @InterfaceStability.Evolving
 public class ObserverReadProxyProvider
 extends AbstractNNFailoverProxyProvider {
-  private static final Logger LOG = LoggerFactory.getLogger(
+  @VisibleForTesting
+  static final Logger LOG = LoggerFactory.getLogger(
   ObserverReadProxyProvider.class);
 
   /** Configuration key for {@link #autoMsyncPeriodMs}. */
@@ -251,14 +253,7 @@ public class ObserverReadProxyProvider
 }
 currentIndex = (currentIndex + 1) % nameNodeProxies.size();
 currentProxy = createProxyIfNeeded(nameNodeProxies.get(currentIndex));
-try {
-  HAServiceState state = currentProxy.proxy.getHAServiceState();
-  currentProxy.setCachedState(state);
-} catch (IOException e) {
-  LOG.info("Failed to connect to {}. Setting cached state to Standby",
-  currentProxy.getAddress(), e);
-  currentProxy.setCachedState(HAServiceState.STANDBY);
-}
+currentProxy.setCachedState(getHAServiceState(currentProxy));
 LOG.debug("Changed current proxy from {} to {}",
 initial == null ? "none" : initial.proxyInfo,
 currentProxy.proxyInfo);
@@ -266,6 +261,31 @@ public class ObserverReadProxyProvider
   }
 
   /**
+   * Fetch the service state from a proxy. If it is unable to be fetched,
+   * assume it is in standby state, but log the exception.
+   */
+  private HAServiceState getHAServiceState(NNProxyInfo proxyInfo) {
+IOException ioe;
+try {
+  return proxyInfo.proxy.getHAServiceState();
+} catch (RemoteException re) {
+  // Though a Standby will allow a getHAServiceState call, it won't allow
+  // delegation token lookup, so if DT is used it throws StandbyException
+  if (re.unwrapRemoteException() instanceof StandbyException) {
+LOG.debug("NameNode {} threw StandbyException when fetching HAState",
+proxyInfo.getAddress());
+return HAServiceState.STANDBY;
+  }
+  ioe = re;
+} catch (IOException e) {
+  ioe = e;
+}
+LOG.info("Failed to connect to {}. Assuming Standby state",
+proxyInfo.getAddress(), ioe);
+return HAServiceState.STANDBY;
+  }
+
+  /**
* This will call {@link ClientProtocol#msync()} on the active NameNode
* (via the {@link #failoverProxy}) to initialize the state of this client.
* Calling it multiple times is a no-op; only the first will perform an
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
index 16aa924..40b2fe8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
@@ -265,24 +265,34 @@ public abstract class HATestUtil {
   /** Sets the required configurations for performing failover.  */
   public static void setFailoverConfigurations(MiniDFSCluster cluster,
   Configuration conf, String logicalName, int nsIndex) {
+setFailoverConfigurations(cluster, conf, logicalName, nsIndex,
+ConfiguredFailoverProxyProvider.class);
+  }
+
+  /** Sets the required configurations for 

[hadoop] 22/50: HDFS-13749. [SBN read] Use getServiceStatus to discover observer namenodes. Contributed by Chao Sun.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 32737360a5b27db2fc34a7e89263ace3267e3295
Author: Erik Krogen 
AuthorDate: Thu Sep 20 13:27:58 2018 -0700

HDFS-13749. [SBN read] Use getServiceStatus to discover observer namenodes. 
Contributed by Chao Sun.
---
 .../apache/hadoop/hdfs/NameNodeProxiesClient.java  |  42 -
 .../ha/AbstractNNFailoverProxyProvider.java|  36 +--
 .../namenode/ha/IPFailoverProxyProvider.java   |   2 +-
 .../namenode/ha/ObserverReadProxyProvider.java |  49 +-
 .../namenode/ha/TestObserverReadProxyProvider.java | 105 +++--
 5 files changed, 149 insertions(+), 85 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
index 284e4ef..b71e84d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
@@ -25,12 +25,16 @@ import java.net.InetSocketAddress;
 import java.net.URI;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.namenode.ha.ClientHAProxyFactory;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAProxyFactory;
 import org.apache.hadoop.ipc.AlignmentContext;
+import org.apache.hadoop.ipc.Client;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -62,8 +66,9 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /**
- * Create proxy objects with {@link ClientProtocol} to communicate with a 
remote
- * NN. Generally use {@link 
NameNodeProxiesClient#createProxyWithClientProtocol(
+ * Create proxy objects with {@link ClientProtocol} and
+ * {@link HAServiceProtocol} to communicate with a remote NN. For the former,
+ * generally use {@link NameNodeProxiesClient#createProxyWithClientProtocol(
  * Configuration, URI, AtomicBoolean)}, which will create either an HA- or
  * non-HA-enabled client proxy as appropriate.
  *
@@ -76,6 +81,11 @@ public class NameNodeProxiesClient {
   private static final Logger LOG = LoggerFactory.getLogger(
   NameNodeProxiesClient.class);
 
+  /** Maximum # of retries for HAProxy with HAServiceProtocol. */
+  private static final int MAX_RETRIES = 3;
+  /** Initial retry delay for HAProxy with HAServiceProtocol. */
+  private static final int DELAY_MILLISECONDS = 200;
+
   /**
* Wrapper for a client proxy as well as its associated service ID.
* This is simply used as a tuple-like return type for created NN proxy.
@@ -343,6 +353,34 @@ public class NameNodeProxiesClient {
 fallbackToSimpleAuth, null);
   }
 
+  /**
+   * Creates a non-HA proxy object with {@link HAServiceProtocol} to the
+   * given NameNode address, using the provided configuration. The proxy will
+   * use the RPC timeout configuration specified via {@link
+   * org.apache.hadoop.fs.CommonConfigurationKeys#IPC_CLIENT_RPC_TIMEOUT_KEY}.
+   * Upon failures, this will retry up to certain times with {@link 
RetryProxy}.
+   *
+   * @param address the NameNode address
+   * @param conf the configuration to be used
+   * @return a non-HA proxy with {@link HAServiceProtocol}.
+   */
+  public static HAServiceProtocol createNonHAProxyWithHAServiceProtocol(
+  InetSocketAddress address, Configuration conf) throws IOException {
+RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(
+MAX_RETRIES, DELAY_MILLISECONDS, TimeUnit.MILLISECONDS);
+
+HAServiceProtocol proxy =
+new HAServiceProtocolClientSideTranslatorPB(
+address, conf, NetUtils.getDefaultSocketFactory(conf),
+Client.getRpcTimeout(conf));
+return (HAServiceProtocol) RetryProxy.create(
+HAServiceProtocol.class,
+new DefaultFailoverProxyProvider<>(HAServiceProtocol.class, proxy),
+new HashMap<>(),
+timeoutPolicy
+);
+  }
+
   public static ClientProtocol createProxyWithAlignmentContext(
   InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
   boolean withRetries, AtomicBoolean fallbackToSimpleAuth,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java

[hadoop] 14/50: HDFS-13688. [SBN read] Introduce msync API call. Contributed by Chen Liang.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 0f811b0849a7621a568bdaf2f0bb5eefeb62e0a9
Author: Erik Krogen 
AuthorDate: Wed Aug 1 09:58:04 2018 -0700

HDFS-13688. [SBN read] Introduce msync API call. Contributed by Chen Liang.
---
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java| 14 ++
 .../org/apache/hadoop/hdfs/protocol/ClientProtocol.java| 11 +++
 .../protocolPB/ClientNamenodeProtocolTranslatorPB.java | 11 +++
 .../src/main/proto/ClientNamenodeProtocol.proto|  8 
 .../java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java |  3 ++-
 .../hdfs/server/federation/router/RouterRpcServer.java |  5 +
 .../ClientNamenodeProtocolServerSideTranslatorPB.java  | 13 +
 .../hadoop/hdfs/server/namenode/NameNodeRpcServer.java |  5 +
 8 files changed, 69 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index eb77db6..4f708a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3042,4 +3042,18 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 checkOpen();
 return new OpenFilesIterator(namenode, tracer, openFilesTypes, path);
   }
+
+  /**
+   * A blocking call to wait for Observer NameNode state ID to reach to the
+   * current client state ID. Current client state ID is given by the client
+   * alignment context.
+   * An assumption is that client alignment context has the state ID set at 
this
+   * point. This is become ObserverReadProxyProvider sets up the initial state
+   * ID when it is being created.
+   *
+   * @throws IOException
+   */
+  public void msync() throws IOException {
+namenode.msync();
+  }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 7923113..6196500 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1734,4 +1734,15 @@ public interface ClientProtocol {
   @ReadOnly
   BatchedEntries listOpenFiles(long prevId,
   EnumSet openFilesTypes, String path) throws IOException;
+
+  /**
+   * Called by client to wait until the server has reached the state id of the
+   * client. The client and server state id are given by client side and server
+   * side alignment context respectively. This can be a blocking call.
+   *
+   * @throws IOException
+   */
+  @Idempotent
+  @ReadOnly
+  void msync() throws IOException;
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index b843e29..60b98fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -152,6 +152,8 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSa
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCacheDirectiveRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MsyncRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MsyncResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
@@ -1880,4 +1882,13 @@ public class ClientNamenodeProtocolTranslatorPB 
implements
 }
   }
 
+  @Override
+  public void msync() throws IOException {
+MsyncRequestProto.Builder req = MsyncRequestProto.newBuilder();
+try {
+  rpcProxy.msync(null, req.build());
+} catch (ServiceException e) {
+  throw ProtobufHelper.getRemoteException(e);
+  

[hadoop] 11/50: HDFS-12976. [SBN read] Introduce ObserverReadProxyProvider. Contributed by Chao Sun.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 13e86926a5a313bca098e272b5fa559b1680bee2
Author: Chao Sun 
AuthorDate: Wed Jul 11 16:01:05 2018 -0700

HDFS-12976. [SBN read] Introduce ObserverReadProxyProvider. Contributed by 
Chao Sun.
---
 .../hadoop/io/retry/RetryInvocationHandler.java|   5 +
 .../org/apache/hadoop/hdfs/ClientGSIContext.java   |   2 +-
 .../namenode/ha/ObserverReadProxyProvider.java | 273 +
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java |   6 +
 .../hdfs/TestStateAlignmentContextWithHA.java  |   2 +-
 .../hadoop/hdfs/server/namenode/ha/HATestUtil.java |  12 +
 .../hdfs/server/namenode/ha/TestObserverNode.java  | 337 +
 7 files changed, 635 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
index 9f01c39..64824a1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
@@ -448,4 +448,9 @@ public class RetryInvocationHandler implements 
RpcInvocationHandler {
   public ConnectionId getConnectionId() {
 return RPC.getConnectionIdForProxy(proxyDescriptor.getProxy());
   }
+
+  @VisibleForTesting
+  public FailoverProxyProvider getProxyProvider() {
+return proxyDescriptor.fpp;
+  }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientGSIContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientGSIContext.java
index 6f69eed..241ec05 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientGSIContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientGSIContext.java
@@ -34,7 +34,7 @@ import java.util.concurrent.atomic.LongAccumulator;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
-class ClientGSIContext implements AlignmentContext {
+public class ClientGSIContext implements AlignmentContext {
 
   private final LongAccumulator lastSeenStateId =
   new LongAccumulator(Math::max, Long.MIN_VALUE);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
new file mode 100644
index 000..25035ab
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -0,0 +1,273 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.ha;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.ClientGSIContext;
+import org.apache.hadoop.hdfs.NameNodeProxiesClient;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.ipc.AlignmentContext;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A {@link org.apache.hadoop.io.retry.FailoverProxyProvider} implementation
+ * that supports reading from observer namenode(s).
+ *
+ * This constructs a wrapper proxy that sends the request to observer
+ * namenode(s), if observer read is enabled. 

[hadoop] 17/50: HDFS-13851. Remove AlignmentContext from AbstractNNFailoverProxyProvider. Contributed by Konstantin Shvachko.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit ad80383b341f00b81d2cf0f7d607406a3e8de7e9
Author: Konstantin V Shvachko 
AuthorDate: Thu Aug 23 18:00:19 2018 -0700

HDFS-13851. Remove AlignmentContext from AbstractNNFailoverProxyProvider. 
Contributed by Konstantin Shvachko.
---
 .../hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java   | 7 +--
 .../hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java  | 1 -
 .../org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java| 5 -
 3 files changed, 1 insertion(+), 12 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
index 1cf00cf..252b70d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HAUtilClient;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.io.retry.FailoverProxyProvider;
-import org.apache.hadoop.ipc.AlignmentContext;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -107,11 +106,7 @@ public abstract class AbstractNNFailoverProxyProvider 
implements
 return fallbackToSimpleAuth;
   }
 
-  public synchronized AlignmentContext getAlignmentContext() {
-return null; // by default the context is null
-  }
-
-  /**
+  /**
* ProxyInfo to a NameNode. Includes its address.
*/
   public static class NNProxyInfo extends ProxyInfo {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index 25035ab..754fea4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -113,7 +113,6 @@ public class ObserverReadProxyProvider
 }
   }
 
-  @Override
   public synchronized AlignmentContext getAlignmentContext() {
 return alignmentContext;
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
index ae82881..1acbd75 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
@@ -94,11 +94,6 @@ public class TestStateAlignmentContextWithHA {
 
   AC_LIST.add(alignmentContext);
 }
-
-@Override // AbstractNNFailoverProxyProvider
-public synchronized ClientGSIContext getAlignmentContext() {
-  return this.alignmentContext;
-}
   }
 
   static class SpyConfiguredContextProxyProvider


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 30/50: HDFS-14035. NN status discovery does not leverage delegation token. Contributed by Chen Liang.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 683daedc1f980cf2cec93a3be4c8ab50a76126fa
Author: Chen Liang 
AuthorDate: Wed Nov 14 13:32:13 2018 -0800

HDFS-14035. NN status discovery does not leverage delegation token. 
Contributed by Chen Liang.
---
 .../java/org/apache/hadoop/hdfs/DFSClient.java | 13 +
 .../apache/hadoop/hdfs/NameNodeProxiesClient.java  | 31 
 .../hadoop/hdfs/protocol/ClientProtocol.java   | 13 -
 .../ClientNamenodeProtocolTranslatorPB.java| 27 ++
 .../ha/AbstractNNFailoverProxyProvider.java| 33 ++---
 .../namenode/ha/IPFailoverProxyProvider.java   |  2 +-
 .../namenode/ha/ObserverReadProxyProvider.java |  9 +++-
 .../src/main/proto/ClientNamenodeProtocol.proto| 10 
 .../apache/hadoop/hdfs/protocol/TestReadOnly.java  |  3 +-
 .../server/federation/router/RouterRpcServer.java  |  9 +++-
 ...ientNamenodeProtocolServerSideTranslatorPB.java | 36 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java|  6 +++
 .../namenode/ha/TestConsistentReadsObserver.java   |  3 ++
 .../namenode/ha/TestObserverReadProxyProvider.java | 57 +-
 14 files changed, 152 insertions(+), 100 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 4f708a5..d9d6f42 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -91,6 +91,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsCreateModes;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
@@ -3056,4 +3057,16 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   public void msync() throws IOException {
 namenode.msync();
   }
+
+  /**
+   * An unblocking call to get the HA service state of NameNode.
+   *
+   * @return HA state of NameNode
+   * @throws IOException
+   */
+  @VisibleForTesting
+  public HAServiceProtocol.HAServiceState getHAServiceState()
+  throws IOException {
+return namenode.getHAServiceState();
+  }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
index b71e84d..93227bd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
@@ -25,16 +25,13 @@ import java.net.InetSocketAddress;
 import java.net.URI;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ha.HAServiceProtocol;
-import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.namenode.ha.ClientHAProxyFactory;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAProxyFactory;
 import org.apache.hadoop.ipc.AlignmentContext;
-import org.apache.hadoop.ipc.Client;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -353,34 +350,6 @@ public class NameNodeProxiesClient {
 fallbackToSimpleAuth, null);
   }
 
-  /**
-   * Creates a non-HA proxy object with {@link HAServiceProtocol} to the
-   * given NameNode address, using the provided configuration. The proxy will
-   * use the RPC timeout configuration specified via {@link
-   * org.apache.hadoop.fs.CommonConfigurationKeys#IPC_CLIENT_RPC_TIMEOUT_KEY}.
-   * Upon failures, this will retry up to certain times with {@link 
RetryProxy}.
-   *
-   * @param address the NameNode address
-   * @param conf the configuration to be used
-   * @return a non-HA proxy with {@link HAServiceProtocol}.
-   */
-  public static HAServiceProtocol createNonHAProxyWithHAServiceProtocol(
-  InetSocketAddress address, Configuration conf) throws IOException {
-RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(
-MAX_RETRIES, DELAY_MILLISECONDS, TimeUnit.MILLISECONDS);
-
-HAServiceProtocol proxy =
-new HAServiceProtocolClientSideTranslatorPB(
-address, conf, NetUtils.getDefaultSocketFactory(conf),
-   

[hadoop] 35/50: HDFS-14131. [SBN read] Create user guide for Consistent Reads from Observer feature. Contributed by Chao Sun.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b8df864ab0e562fa0f3e498877afb65e8423
Author: Erik Krogen 
AuthorDate: Tue Dec 11 12:22:12 2018 -0800

HDFS-14131. [SBN read] Create user guide for Consistent Reads from Observer 
feature. Contributed by Chao Sun.
---
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md  |   2 +
 .../src/site/markdown/ObserverNameNode.md  | 173 +
 2 files changed, 175 insertions(+)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 1fe445b..9c72528 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -500,6 +500,7 @@ Usage:
 
 hdfs haadmin -transitionToActive  [--forceactive]
 hdfs haadmin -transitionToStandby 
+hdfs haadmin -transitionToObserver 
 hdfs haadmin -failover [--forcefence] [--forceactive]  

 hdfs haadmin -getServiceState 
 hdfs haadmin -getAllServiceState
@@ -515,6 +516,7 @@ Usage:
 | `-getAllServiceState` | returns the state of all the NameNodes | |
 | `-transitionToActive` | transition the state of the given NameNode to Active 
(Warning: No fencing is done) |
 | `-transitionToStandby` | transition the state of the given NameNode to 
Standby (Warning: No fencing is done) |
+| `-transitionToObserver` | transition the state of the given NameNode to 
Observer (Warning: No fencing is done) |
 | `-help` [cmd] | Displays help for the given command or all commands if none 
is specified. |
 
 See [HDFS HA with 
NFS](./HDFSHighAvailabilityWithNFS.html#Administrative_commands) or [HDFS HA 
with QJM](./HDFSHighAvailabilityWithQJM.html#Administrative_commands) for more 
information on this command.
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ObserverNameNode.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ObserverNameNode.md
new file mode 100644
index 000..2548315
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ObserverNameNode.md
@@ -0,0 +1,173 @@
+
+
+Consistent Reads from HDFS Observer NameNode
+=
+
+
+
+Purpose
+
+
+This guide provides an overview of the HDFS Observer NameNode feature
+and how to configure/install it in a typical HA-enabled cluster. For a
+detailed technical design overview, please check the doc attached to
+HDFS-12943.
+
+Background
+---
+
+In a HA-enabled HDFS cluster (for more information, check
+[HDFSHighAvailabilityWithQJM](./HDFSHighAvailabilityWithQJM.md)), there
+is a single Active NameNode and one or more Standby NameNode(s). The
+Active NameNode is responsible for serving all client requests, while
+Standby NameNode just keep the up-to-date information regarding the
+namespace, by tailing edit logs from JournalNodes, as well as block
+location information, by receiving block reports from all the DataNodes.
+One drawback of this architecture is that the Active NameNode could be a
+single bottle-neck and be overloaded with client requests, especially in
+a busy cluster.
+
+The Consistent Reads from HDFS Observer NameNode feature addresses the
+above by introducing a new type of NameNode called **Observer
+NameNode**. Similar to Standby NameNode, Observer NameNode keeps itself
+up-to-date regarding the namespace and block location information.
+In addition, it also has the ability to serve consistent reads, like
+Active NameNode. Since read requests are the majority in a typical
+environment, this can help to load balancing the NameNode traffic and
+improve overall throughput.
+
+Architecture
+--
+
+In the new architecture, a HA cluster could consists of namenodes in 3
+different states: active, standby and observer. State transition can
+happen between active and standby, standby and observer, but not
+directly between active and observer.
+
+To ensure read-after-write consistency within a single client, a state
+ID, which is implemented using transaction ID within NameNode, is
+introduced in RPC headers. When a client performs write through Active
+NameNode, it updates its state ID using the latest transaction ID from
+the NameNode. When performing a subsequent read, the client passes this
+state ID to Observe NameNode, which will then check against its own
+transaction ID, and will ensure its own transaction ID has caught up
+with the request's state ID, before serving the read request.
+
+Edit log tailing is critical for Observer NameNode as it directly affects
+the latency between when a transaction is applied in Active NameNode and
+when it is applied in the Observer NameNode. A new edit log tailing
+mechanism, named "Edit Tailing Fast-Path", is introduced to
+significantly reduce this 

[hadoop] 12/50: HDFS-13665. [SBN read] Move RPC response serialization into Server.doResponse(). Contributed by Plamen Jeliazkov.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit ac0cdb0a8d320e3f3e66ab0332a673f4019abb4d
Author: Plamen Jeliazkov 
AuthorDate: Wed Jul 11 16:07:05 2018 -0700

HDFS-13665. [SBN read] Move RPC response serialization into 
Server.doResponse(). Contributed by Plamen Jeliazkov.
---
 .../main/java/org/apache/hadoop/ipc/Server.java| 43 +-
 1 file changed, 17 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 1a774b1..f1dc26b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -836,15 +836,15 @@ public abstract class Server {
 final Writable rpcRequest;// Serialized Rpc request from client
 ByteBuffer rpcResponse;   // the response for this call
 
-private RpcResponseHeaderProto bufferedHeader; // the response header
-private Writable bufferedRv;   // the byte response
+private ResponseParams responseParams; // the response params
+private Writable rv;   // the byte response
 
 RpcCall(RpcCall call) {
   super(call);
   this.connection = call.connection;
   this.rpcRequest = call.rpcRequest;
-  this.bufferedRv = call.bufferedRv;
-  this.bufferedHeader = call.bufferedHeader;
+  this.rv = call.rv;
+  this.responseParams = call.responseParams;
 }
 
 RpcCall(Connection connection, int id) {
@@ -865,12 +865,10 @@ public abstract class Server {
   this.rpcRequest = param;
 }
 
-public void setBufferedHeader(RpcResponseHeaderProto header) {
-  this.bufferedHeader = header;
-}
-
-public void setBufferedRv(Writable rv) {
-  this.bufferedRv = rv;
+void setResponseFields(Writable returnValue,
+   ResponseParams responseParams) {
+  this.rv = returnValue;
+  this.responseParams = responseParams;
 }
 
 @Override
@@ -904,9 +902,7 @@ public abstract class Server {
 populateResponseParamsOnError(e, responseParams);
   }
   if (!isResponseDeferred()) {
-setupResponse(this, responseParams.returnStatus,
-responseParams.detailedErr,
-value, responseParams.errorClass, responseParams.error);
+setResponseFields(value, responseParams);
 sendResponse();
   } else {
 if (LOG.isDebugEnabled()) {
@@ -961,13 +957,11 @@ public abstract class Server {
 setupResponse(call,
 RpcStatusProto.FATAL, RpcErrorCodeProto.ERROR_RPC_SERVER,
 null, t.getClass().getName(), StringUtils.stringifyException(t));
-  } else if (alignmentContext != null) {
-// rebuild response with state context in header
-RpcResponseHeaderProto.Builder responseHeader =
-call.bufferedHeader.toBuilder();
-alignmentContext.updateResponseState(responseHeader);
-RpcResponseHeaderProto builtHeader = responseHeader.build();
-setupResponse(call, builtHeader, call.bufferedRv);
+  } else {
+setupResponse(call, call.responseParams.returnStatus,
+call.responseParams.detailedErr, call.rv,
+call.responseParams.errorClass,
+call.responseParams.error);
   }
   connection.sendResponse(call);
 }
@@ -2951,6 +2945,9 @@ public abstract class Server {
 headerBuilder.setRetryCount(call.retryCount);
 headerBuilder.setStatus(status);
 headerBuilder.setServerIpcVersionNum(CURRENT_VERSION);
+if (alignmentContext != null) {
+  alignmentContext.updateResponseState(headerBuilder);
+}
 
 if (status == RpcStatusProto.SUCCESS) {
   RpcResponseHeaderProto header = headerBuilder.build();
@@ -2977,12 +2974,6 @@ public abstract class Server {
 
   private void setupResponse(RpcCall call,
   RpcResponseHeaderProto header, Writable rv) throws IOException {
-if (alignmentContext != null && call.bufferedHeader == null
-&& call.bufferedRv == null) {
-  call.setBufferedHeader(header);
-  call.setBufferedRv(rv);
-}
-
 final byte[] response;
 if (rv == null || (rv instanceof RpcWritable.ProtobufWrapper)) {
   response = setupResponseForProtobuf(header, rv);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 09/50: HDFS-13609. [SBN read] Edit Tail Fast Path Part 3: NameNode-side changes to support tailing edits via RPC. Contributed by Erik Krogen.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit f847983f2c621d3de2c2ffbcfc1f1e40d83efa4e
Author: Erik Krogen 
AuthorDate: Tue May 22 16:45:26 2018 -0700

HDFS-13609. [SBN read] Edit Tail Fast Path Part 3: NameNode-side changes to 
support tailing edits via RPC. Contributed by Erik Krogen.
---
 .../hadoop/hdfs/qjournal/client/AsyncLogger.java   |   7 ++
 .../hdfs/qjournal/client/AsyncLoggerSet.java   |  14 +++
 .../hdfs/qjournal/client/IPCLoggerChannel.java |  14 +++
 .../hdfs/qjournal/client/QuorumJournalManager.java | 111 +-
 .../server/namenode/EditLogFileInputStream.java|  44 +++
 .../hdfs/server/namenode/ha/EditLogTailer.java |   6 +-
 .../src/main/resources/hdfs-default.xml|   4 +-
 .../qjournal/client/TestQuorumJournalManager.java  | 130 +
 .../client/TestQuorumJournalManagerUnit.java   | 101 +++-
 .../namenode/TestEditLogFileInputStream.java   |  18 +++
 10 files changed, 439 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
index d2b48cc..7230ebc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java
@@ -22,6 +22,7 @@ import java.net.URL;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
+import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
@@ -106,6 +107,12 @@ interface AsyncLogger {
* Begin a new epoch on the target node.
*/
   public ListenableFuture newEpoch(long epoch);
+
+  /**
+   * Fetch journaled edits from the cache.
+   */
+  public ListenableFuture getJournaledEdits(
+  long fromTxnId, int maxTransactions);
   
   /**
* Fetch the list of edit logs available on the remote node.
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
index d46c2cf..15e1df6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
@@ -26,6 +26,7 @@ import java.util.concurrent.TimeoutException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto;
+import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
@@ -261,6 +262,19 @@ class AsyncLoggerSet {
 return QuorumCall.create(calls);
   }
 
+  public QuorumCall
+  getJournaledEdits(long fromTxnId, int maxTransactions) {
+Map> calls
+= Maps.newHashMap();
+for (AsyncLogger logger : loggers) {
+  ListenableFuture future =
+  logger.getJournaledEdits(fromTxnId, maxTransactions);
+  calls.put(logger, future);
+}
+return QuorumCall.create(calls);
+  }
+
   public QuorumCall getEditLogManifest(
   long fromTxnId, boolean inProgressOk) {
 Map getJournaledEdits(
+  long fromTxnId, int maxTransactions) {
+return parallelExecutor.submit(
+new Callable() {
+  @Override
+  public GetJournaledEditsResponseProto call() throws IOException {
+return getProxy().getJournaledEdits(journalId, nameServiceId,
+fromTxnId, maxTransactions);
+  }
+});
+  }
+
+  @Override
   public ListenableFuture getEditLogManifest(
   final long fromTxnId, final boolean inProgressOk) {
 return parallelExecutor.submit(new Callable() {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
 

[hadoop] 23/50: HDFS-13898. [SBN read] Throw retriable exception for getBlockLocations when ObserverNameNode is in safemode. Contributed by Chao Sun.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 894f095219c6a19f4ba21d4bbeeaa9e94c34468b
Author: Erik Krogen 
AuthorDate: Fri Sep 21 14:57:52 2018 -0700

HDFS-13898. [SBN read] Throw retriable exception for getBlockLocations when 
ObserverNameNode is in safemode. Contributed by Chao Sun.
---
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  5 +-
 .../hdfs/server/namenode/NameNodeAdapter.java  |  7 +++
 .../hdfs/server/namenode/ha/TestObserverNode.java  | 67 ++
 3 files changed, 78 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index d110513..4dfc24a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -88,6 +88,8 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROU
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*;
+import static org.apache.hadoop.ha.HAServiceProtocol.HAServiceState.ACTIVE;
+import static org.apache.hadoop.ha.HAServiceProtocol.HAServiceState.OBSERVER;
 
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
@@ -1927,7 +1929,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 SafeModeException se = newSafemodeException(
 "Zero blocklocations for " + srcArg);
 if (haEnabled && haContext != null &&
-haContext.getState().getServiceState() == 
HAServiceState.ACTIVE) {
+(haContext.getState().getServiceState() == ACTIVE ||
+haContext.getState().getServiceState() == OBSERVER)) {
   throw new RetriableException(se);
 } else {
   throw se;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index 4eac7e1..c5ef34e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.mockito.Mockito.spy;
 
@@ -221,6 +222,12 @@ public class NameNodeAdapter {
 return fsnSpy;
   }
 
+  public static BlockManager spyOnBlockManager(NameNode nn) {
+BlockManager bmSpy = Mockito.spy(nn.getNamesystem().getBlockManager());
+nn.getNamesystem().setBlockManagerForTesting(bmSpy);
+return bmSpy;
+  }
+
   public static ReentrantReadWriteLock spyOnFsLock(FSNamesystem fsn) {
 ReentrantReadWriteLock spy = Mockito.spy(fsn.getFsLockForTests());
 fsn.setFsLockForTests(spy);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
index 89bfffb..c9e79fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
@@ -24,8 +24,15 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.retry.FailoverProxyProvider;
 import org.apache.hadoop.io.retry.RetryInvocationHandler;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -38,9 

[hadoop] 19/50: HDFS-13779. [SBN read] Implement proper failover and observer failure handling logic for for ObserverReadProxyProvider. Contributed by Erik Krogen.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 34b05a26acf8854230bce08f507cc0887f0bc332
Author: Erik Krogen 
AuthorDate: Fri Aug 24 05:04:27 2018 -0700

HDFS-13779. [SBN read] Implement proper failover and observer failure 
handling logic for for ObserverReadProxyProvider. Contributed by Erik Krogen.
---
 .../ha/AbstractNNFailoverProxyProvider.java|  16 +
 .../namenode/ha/ObserverReadProxyProvider.java | 255 ++--
 .../hdfs/server/namenode/ha/TestObserverNode.java  |  27 +-
 .../namenode/ha/TestObserverReadProxyProvider.java | 335 +
 4 files changed, 532 insertions(+), 101 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
index 252b70d..32edb36 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/AbstractNNFailoverProxyProvider.java
@@ -30,6 +30,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HAUtilClient;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -111,6 +112,12 @@ public abstract class AbstractNNFailoverProxyProvider 
implements
*/
   public static class NNProxyInfo extends ProxyInfo {
 private InetSocketAddress address;
+/**
+ * The currently known state of the NameNode represented by this ProxyInfo.
+ * This may be out of date if the NameNode has changed state since the last
+ * time the state was checked.
+ */
+private HAServiceState cachedState;
 
 public NNProxyInfo(InetSocketAddress address) {
   super(null, address.toString());
@@ -120,6 +127,15 @@ public abstract class AbstractNNFailoverProxyProvider 
implements
 public InetSocketAddress getAddress() {
   return address;
 }
+
+public void setCachedState(HAServiceState state) {
+  cachedState = state;
+}
+
+public HAServiceState getCachedState() {
+  return cachedState;
+}
+
   }
 
   @Override
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index dcae2db..e819282 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -20,18 +20,24 @@ package org.apache.hadoop.hdfs.server.namenode.ha;
 import java.io.Closeable;
 import java.io.IOException;
 import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.net.URI;
-import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.ClientGSIContext;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.io.retry.AtMostOnce;
+import org.apache.hadoop.io.retry.Idempotent;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
 import org.apache.hadoop.ipc.AlignmentContext;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -59,16 +65,18 @@ public class ObserverReadProxyProvider
   private static final Logger LOG = LoggerFactory.getLogger(
   ObserverReadProxyProvider.class);
 
-  /** Client-side context for syncing with the NameNode server side */
-  private AlignmentContext alignmentContext;
+  /** Client-side context for syncing with the NameNode server side. */
+  private final AlignmentContext alignmentContext;
 
-  private AbstractNNFailoverProxyProvider failoverProxy;
-  /** All NameNdoe proxies */
-  private List> nameNodeProxies =
-  new ArrayList>();
-  /** Proxies for the observer namenodes */
-  private final List> observerProxies =
-   

[hadoop] 27/50: HDFS-13925. Unit Test for transitioning between different states. Contributed by Sherwood Zheng.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit ca565efe2f4250768ea0ba012024d76c30d98521
Author: Konstantin V Shvachko 
AuthorDate: Tue Oct 16 16:35:02 2018 -0700

HDFS-13925. Unit Test for transitioning between different states. 
Contributed by Sherwood Zheng.
---
 .../hdfs/server/namenode/ha/TestObserverNode.java  | 23 ++
 1 file changed, 23 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
index 28fd330..b18c5b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestObserverNode.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode.ha;
 import static 
org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getServiceState;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyLong;
@@ -30,11 +31,13 @@ import java.io.IOException;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.List;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -100,6 +103,26 @@ public class TestObserverNode {
   }
 
   @Test
+  public void testNoActiveToObserver() throws Exception {
+try {
+  dfsCluster.transitionToObserver(0);
+} catch (ServiceFailedException e) {
+  return;
+}
+fail("active cannot be transitioned to observer");
+  }
+
+  @Test
+  public void testNoObserverToActive() throws Exception {
+try {
+  dfsCluster.transitionToActive(2);
+} catch (ServiceFailedException e) {
+  return;
+}
+fail("observer cannot be transitioned to active");
+  }
+
+  @Test
   public void testSimpleRead() throws Exception {
 Path testPath2 = new Path(testPath, "test2");
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 25/50: HDFS-13961. [SBN read] TestObserverNode refactoring. Contributed by Konstantin Shvachko.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 28b11b03d7c9b6b99767421231cf757505004414
Author: Konstantin V Shvachko 
AuthorDate: Fri Oct 5 15:03:38 2018 -0700

HDFS-13961. [SBN read] TestObserverNode refactoring. Contributed by 
Konstantin Shvachko.
---
 .../hdfs/server/namenode/ha/EditLogTailer.java |   2 +-
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java |  10 +-
 .../hdfs/TestStateAlignmentContextWithHA.java  |   6 +-
 .../hadoop/hdfs/qjournal/MiniQJMHACluster.java |   4 +-
 .../hdfs/server/namenode/NameNodeAdapter.java  |   6 +
 .../hadoop/hdfs/server/namenode/ha/HATestUtil.java |  83 -
 .../namenode/ha/TestConsistentReadsObserver.java   | 182 ++
 .../server/namenode/ha/TestMultiObserverNode.java  | 155 +
 .../hdfs/server/namenode/ha/TestObserverNode.java  | 387 +
 9 files changed, 516 insertions(+), 319 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index cd797fd..84048b0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -290,7 +290,7 @@ public class EditLogTailer {
   }
   
   @VisibleForTesting
-  void doTailEdits() throws IOException, InterruptedException {
+  public void doTailEdits() throws IOException, InterruptedException {
 // Write lock needs to be interruptible here because the 
 // transitionToActive RPC takes the write lock before calling
 // tailer.stop() -- so if we're not interruptible, it will
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index a690198..073e0f9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -2579,8 +2579,14 @@ public class MiniDFSCluster implements AutoCloseable {
 getNameNode(nnIndex).getRpcServer().transitionToObserver(
 new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER_FORCED));
   }
-  
-  
+
+  public void rollEditLogAndTail(int nnIndex) throws Exception {
+getNameNode(nnIndex).getRpcServer().rollEditLog();
+for (int i = 2; i < getNumNameNodes(); i++) {
+  getNameNode(i).getNamesystem().getEditLogTailer().doTailEdits();
+}
+  }
+
   public void triggerBlockReports()
   throws IOException {
 for (DataNode dn : getDataNodes()) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
index a494252..a642872 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
@@ -100,10 +100,8 @@ public class TestStateAlignmentContextWithHA {
 cluster.transitionToActive(0);
 cluster.transitionToObserver(2);
 
-String nameservice = HATestUtil.getLogicalHostname(cluster);
-HATestUtil.setFailoverConfigurations(cluster, CONF, nameservice, 0);
-CONF.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX +
-"." + nameservice, ORPPwithAlignmentContexts.class.getName());
+HATestUtil.setupHAConfiguration(
+cluster, CONF, 0, ORPPwithAlignmentContexts.class);
   }
 
   @Before
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
index f1f74dc..a413667 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import 
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 
 import java.io.IOException;
@@ -170,7 +171,8 @@ public class MiniQJMHACluster {
 }
 
 // use standard failover configurations
-

[hadoop] 18/50: HDFS-13782. ObserverReadProxyProvider should work with IPFailoverProxyProvider. Contributed by Konstantin Shvachko.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit be8beda8dc7b186b806149f07bbe18389ebe5b0d
Author: Konstantin V Shvachko 
AuthorDate: Sat Aug 25 17:32:30 2018 -0700

HDFS-13782. ObserverReadProxyProvider should work with 
IPFailoverProxyProvider. Contributed by Konstantin Shvachko.
---
 .../namenode/ha/ObserverReadProxyProvider.java | 105 +
 .../ObserverReadProxyProviderWithIPFailover.java   |  40 
 2 files changed, 108 insertions(+), 37 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index 754fea4..dcae2db 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -17,30 +17,30 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
+import java.io.Closeable;
 import java.io.IOException;
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.net.URI;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.ClientGSIContext;
-import org.apache.hadoop.hdfs.NameNodeProxiesClient;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.ipc.AlignmentContext;
+import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * A {@link org.apache.hadoop.io.retry.FailoverProxyProvider} implementation
  * that supports reading from observer namenode(s).
@@ -55,16 +55,20 @@ import org.slf4j.LoggerFactory;
  * observer is turned off.
  */
 public class ObserverReadProxyProvider
-extends ConfiguredFailoverProxyProvider {
+extends AbstractNNFailoverProxyProvider {
   private static final Logger LOG = LoggerFactory.getLogger(
   ObserverReadProxyProvider.class);
 
   /** Client-side context for syncing with the NameNode server side */
   private AlignmentContext alignmentContext;
 
+  private AbstractNNFailoverProxyProvider failoverProxy;
+  /** All NameNdoe proxies */
+  private List> nameNodeProxies =
+  new ArrayList>();
   /** Proxies for the observer namenodes */
-  private final List> observerProxies =
-  new ArrayList<>();
+  private final List> observerProxies =
+  new ArrayList>();
 
   /**
* Whether reading from observer is enabled. If this is false, all read
@@ -81,36 +85,43 @@ public class ObserverReadProxyProvider
   /** The last proxy that has been used. Only used for testing */
   private volatile ProxyInfo lastProxy = null;
 
-  @SuppressWarnings("unchecked")
+  /**
+   * By default ObserverReadProxyProvider uses
+   * {@link ConfiguredFailoverProxyProvider} for failover.
+   */
   public ObserverReadProxyProvider(
   Configuration conf, URI uri, Class xface, HAProxyFactory factory)
   throws IOException {
+this(conf, uri, xface, factory,
+new ConfiguredFailoverProxyProvider(conf, uri, xface,factory));
+  }
+
+  public ObserverReadProxyProvider(
+  Configuration conf, URI uri, Class xface, HAProxyFactory factory,
+  AbstractNNFailoverProxyProvider failoverProxy)
+  throws IOException {
 super(conf, uri, xface, factory);
-alignmentContext = new ClientGSIContext();
+this.failoverProxy = failoverProxy;
+this.alignmentContext = new ClientGSIContext();
 ((ClientHAProxyFactory) factory).setAlignmentContext(alignmentContext);
 
+// Get all NameNode proxies
+nameNodeProxies = getProxyAddresses(uri,
+HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
 // Find out all the observer proxies
-for (AddressRpcProxyPair ap : this.proxies) {
-  ap.namenode = (T) NameNodeProxiesClient.createProxyWithAlignmentContext(
-  ap.address, conf, ugi, false, getFallbackToSimpleAuth(),
-  alignmentContext);
-  if (isObserverState(ap)) {
-observerProxies.add(ap);
+for (NNProxyInfo pi : nameNodeProxies) {
+  createProxyIfNeeded(pi);
+  if (isObserverState(pi)) {
+observerProxies.add(pi);
   }
 }
 
+// TODO: No observers is not an error
+// 

[hadoop] 29/50: HDFS-14016. [SBN read] ObserverReadProxyProvider should enable observer read by default. Contributed by Chen Liang.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 4ce7f9f2e681a23be79d589bc67776974601bcae
Author: Chao Sun 
AuthorDate: Wed Oct 31 11:22:02 2018 -0700

HDFS-14016. [SBN read] ObserverReadProxyProvider should enable observer 
read by default. Contributed by Chen Liang.
---
 .../hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java   | 2 ++
 1 file changed, 2 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index 87ca718..17bad65 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -141,6 +141,8 @@ public class ObserverReadProxyProvider
 ObserverReadInvocationHandler.class.getClassLoader(),
 new Class[] { xface }, new ObserverReadInvocationHandler());
 combinedProxy = new ProxyInfo<>(wrappedProxy, combinedInfo.toString());
+// TODO : make this configurable or remove this variable
+this.observerReadEnabled = true;
   }
 
   public AlignmentContext getAlignmentContext() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 08/50: HDFS-13608. [SBN read] Edit Tail Fast Path Part 2: Add ability for JournalNode to serve edits via RPC. Contributed by Erik Krogen.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit db9cec6ad2745cdd42ca0c614fb28b367ca080a3
Author: Erik Krogen 
AuthorDate: Wed May 23 12:42:13 2018 -0700

HDFS-13608. [SBN read] Edit Tail Fast Path Part 2: Add ability for 
JournalNode to serve edits via RPC. Contributed by Erik Krogen.
---
 .../hadoop-common/src/site/markdown/Metrics.md |  5 ++
 .../hdfs/qjournal/protocol/QJournalProtocol.java   | 24 -
 .../QJournalProtocolServerSideTranslatorPB.java| 14 +
 .../protocolPB/QJournalProtocolTranslatorPB.java   | 20 
 .../hadoop/hdfs/qjournal/server/Journal.java   | 59 ++
 .../hdfs/qjournal/server/JournalMetrics.java   | 20 +++-
 .../hdfs/qjournal/server/JournalNodeRpcServer.java |  8 +++
 .../src/main/proto/QJournalProtocol.proto  | 18 +++
 .../hadoop/hdfs/qjournal/server/TestJournal.java   | 46 +
 9 files changed, 212 insertions(+), 2 deletions(-)

diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index c6ca2a7..8e107ef 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -299,6 +299,11 @@ The server-side metrics for a journal from the 
JournalNode's perspective. Each m
 | `LastWrittenTxId` | The highest transaction id stored on this JournalNode |
 | `LastPromisedEpoch` | The last epoch number which this node has promised not 
to accept any lower epoch, or 0 if no promises have been made |
 | `LastJournalTimestamp` | The timestamp of last successfully written 
transaction |
+| `TxnsServedViaRpc` | Number of transactions served via the RPC mechanism |
+| `BytesServedViaRpc` | Number of bytes served via the RPC mechanism |
+| `RpcRequestCacheMissAmountNumMisses` | Number of RPC requests which could 
not be served due to lack of data in the cache |
+| `RpcRequestCacheMissAmountAvgTxns` | The average number of transactions by 
which a request missed the cache; for example if transaction ID 10 is requested 
and the cache's oldest transaction is ID 15, value 5 will be added to this 
average |
+| `RpcEmptyResponses` | Number of RPC requests with zero edits returned |
 
 datanode
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java
index 5558bd5..c002796 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto;
+import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournaledEditsResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto;
 import 
org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
@@ -139,7 +140,28 @@ public interface QJournalProtocol {
  long sinceTxId,
  boolean inProgressOk)
   throws IOException;
-  
+
+  /**
+   * Fetch edit logs present in the Journal's in-memory cache of edits
+   * ({@link org.apache.hadoop.hdfs.qjournal.server.JournaledEditsCache}).
+   * To enable this cache, in-progress edit log tailing must be enabled via the
+   * {@value DFSConfigKeys#DFS_HA_TAILEDITS_INPROGRESS_KEY} configuration key.
+   *
+   * @param jid The ID of the journal from which to fetch edits.
+   * @param nameServiceId The ID of the namespace for which to fetch edits.
+   * @param sinceTxId Fetch edits starting at this transaction ID
+   * @param maxTxns Request at most this many transactions to be returned
+   * @throws IOException If there was an issue encountered while fetching edits
+   * from the cache, including a cache miss (cache does not contain the
+   * requested edits). The caller should then attempt to fetch the edits 
via
+   * the streaming mechanism (starting with
+   * {@link #getEditLogManifest(String, String, long, boolean)}).
+   * @return Response containing serialized edits to be loaded
+   * @see org.apache.hadoop.hdfs.qjournal.server.JournaledEditsCache
+   */
+  

[hadoop] 24/50: HDFS-13791. Limit logging frequency of edit tail related statements. Contributed by Erik Krogen.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 56af83c961c8a4712964623db9e461141b3d9c3d
Author: Chen Liang 
AuthorDate: Thu Sep 27 10:12:37 2018 -0700

HDFS-13791. Limit logging frequency of edit tail related statements. 
Contributed by Erik Krogen.
---
 .../org/apache/hadoop/log/LogThrottlingHelper.java | 34 +++
 .../hdfs/qjournal/client/QuorumJournalManager.java | 15 ++-
 .../hdfs/server/namenode/FSEditLogLoader.java  | 47 +
 .../hadoop/hdfs/server/namenode/FSImage.java   | 19 -
 .../namenode/RedundantEditLogInputStream.java  | 15 ++-
 .../hdfs/server/namenode/TestFSEditLogLoader.java  | 48 ++
 6 files changed, 163 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java
index aa4e61c..591c3fb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java
@@ -273,6 +273,40 @@ public class LogThrottlingHelper {
   }
 
   /**
+   * Return the summary information for given index.
+   *
+   * @param recorderName The name of the recorder.
+   * @param idx The index value.
+   * @return The summary information.
+   */
+  public SummaryStatistics getCurrentStats(String recorderName, int idx) {
+LoggingAction currentLog = currentLogs.get(recorderName);
+if (currentLog != null) {
+  return currentLog.getStats(idx);
+}
+
+return null;
+  }
+
+  /**
+   * Helper function to create a message about how many log statements were
+   * suppressed in the provided log action. If no statements were suppressed,
+   * this returns an empty string. The message has the format (without quotes):
+   *
+   * ' (suppressed logging {suppression_count} times)'
+   *
+   * @param action The log action to produce a message about.
+   * @return A message about suppression within this action.
+   */
+  public static String getLogSupressionMessage(LogAction action) {
+if (action.getCount() > 1) {
+  return " (suppressed logging " + (action.getCount() - 1) + " times)";
+} else {
+  return "";
+}
+  }
+
+  /**
* A standard log action which keeps track of all of the values which have
* been logged. This is also used for internal bookkeeping via its private
* fields and methods; it will maintain whether or not it is ready to be
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index f4177e3..5f6d45f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -54,6 +54,8 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.log.LogThrottlingHelper;
+import org.apache.hadoop.log.LogThrottlingHelper.LogAction;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
@@ -105,6 +107,11 @@ public class QuorumJournalManager implements 
JournalManager {
   private int outputBufferCapacity = 512 * 1024;
   private final URLConnectionFactory connectionFactory;
 
+  /** Limit logging about input stream selection to every 5 seconds max. */
+  private static final long SELECT_INPUT_STREAM_LOG_INTERVAL_MS = 5000;
+  private final LogThrottlingHelper selectInputStreamLogHelper =
+  new LogThrottlingHelper(SELECT_INPUT_STREAM_LOG_INTERVAL_MS);
+
   @VisibleForTesting
   public QuorumJournalManager(Configuration conf,
   URI uri,
@@ -568,8 +575,12 @@ public class QuorumJournalManager implements 
JournalManager {
   "ID " + fromTxnId);
   return;
 }
-LOG.info("Selected loggers with >= " + maxAllowedTxns +
-" transactions starting from " + fromTxnId);
+LogAction logAction = selectInputStreamLogHelper.record(fromTxnId);
+if (logAction.shouldLog()) {
+  LOG.info("Selected loggers with >= " + maxAllowedTxns + " transactions " 
+
+  "starting from lowest txn ID " + logAction.getStats(0).getMin() +
+  LogThrottlingHelper.getLogSupressionMessage(logAction));
+}
 PriorityQueue allStreams = new PriorityQueue<>(
 

[hadoop] 07/50: HDFS-13607. [SBN read] Edit Tail Fast Path Part 1: Enhance JournalNode with an in-memory cache of recent edit transactions. Contributed by Erik Krogen.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit eacf43252a7065eeb448e2c67a73be565641c33d
Author: Erik Krogen 
AuthorDate: Wed May 9 15:40:07 2018 -0700

HDFS-13607. [SBN read] Edit Tail Fast Path Part 1: Enhance JournalNode with 
an in-memory cache of recent edit transactions. Contributed by Erik Krogen.
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |   3 +
 .../hdfs/qjournal/server/JournaledEditsCache.java  | 393 +
 .../src/main/resources/hdfs-default.xml|  12 +
 .../qjournal/server/TestJournaledEditsCache.java   | 257 ++
 4 files changed, 665 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 7520069..5e284db 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -978,6 +978,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String DFS_JOURNALNODE_SYNC_INTERVAL_KEY =
   "dfs.journalnode.sync.interval";
   public static final long DFS_JOURNALNODE_SYNC_INTERVAL_DEFAULT = 2*60*1000L;
+  public static final String DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY =
+  "dfs.journalnode.edit-cache-size.bytes";
+  public static final int DFS_JOURNALNODE_EDIT_CACHE_SIZE_DEFAULT = 1024 * 
1024;
 
   // Journal-node related configs for the client side.
   public static final String  DFS_QJOURNAL_QUEUE_SIZE_LIMIT_KEY = 
"dfs.qjournal.queued-edits.limit.mb";
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournaledEditsCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournaledEditsCache.java
new file mode 100644
index 000..1151069
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournaledEditsCache.java
@@ -0,0 +1,393 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.qjournal.server;
+
+import com.google.common.annotations.VisibleForTesting;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
+import org.apache.hadoop.util.AutoCloseableLock;
+
+
+/**
+ * An in-memory cache of edits in their serialized form. This is used to serve
+ * the {@link Journal#getJournaledEdits(long, int)} call, used by the
+ * QJM when {@value DFSConfigKeys#DFS_HA_TAILEDITS_INPROGRESS_KEY} is
+ * enabled.
+ *
+ * When a batch of edits is received by the JournalNode, it is put into this
+ * cache via {@link #storeEdits(byte[], long, long, int)}. Edits must be
+ * stored contiguously; if a batch of edits is stored that does not align with
+ * the previously stored edits, the cache will be cleared before storing new
+ * edits to avoid gaps. This decision is made because gaps are only handled
+ * when in recovery mode, which the cache is not intended to be used for.
+ *
+ * Batches of edits are stored in a {@link TreeMap} mapping the starting
+ * transaction ID of the batch to the data buffer. Upon retrieval, the
+ * relevant data buffers are concatenated together and a header is added
+ * to construct a fully-formed edit data stream.
+ *
+ * The cache is of a limited size capacity determined by
+ * 

[hadoop] 20/50: HDFS-13880. Add mechanism to allow certain RPC calls to bypass sync. Contributed by Chen Liang.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 613c9e4f7b797b18072f488e1957e00ab7a0f2e8
Author: Chen Liang 
AuthorDate: Thu Sep 13 16:22:37 2018 -0700

HDFS-13880. Add mechanism to allow certain RPC calls to bypass sync. 
Contributed by Chen Liang.
---
 .../org/apache/hadoop/ipc/AlignmentContext.java| 16 ++
 .../main/java/org/apache/hadoop/ipc/Server.java| 42 --
 .../org/apache/hadoop/hdfs/ClientGSIContext.java   |  6 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java   | 64 +++---
 .../hadoop/hdfs/server/namenode/ha/ReadOnly.java   |  7 +++
 .../hdfs/server/namenode/GlobalStateIdContext.java | 21 +++
 .../qjournal/client/TestQuorumJournalManager.java  | 19 +--
 .../hdfs/server/namenode/ha/TestObserverNode.java  | 52 ++
 8 files changed, 185 insertions(+), 42 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
index 0e8b960..a435ff6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
@@ -38,6 +38,7 @@ public interface AlignmentContext {
   /**
* This is the intended server method call to implement to pass state info
* during RPC response header construction.
+   *
* @param header The RPC response header builder.
*/
   void updateResponseState(RpcResponseHeaderProto.Builder header);
@@ -45,6 +46,7 @@ public interface AlignmentContext {
   /**
* This is the intended client method call to implement to recieve state info
* during RPC response processing.
+   *
* @param header The RPC response header.
*/
   void receiveResponseState(RpcResponseHeaderProto header);
@@ -52,6 +54,7 @@ public interface AlignmentContext {
   /**
* This is the intended client method call to pull last seen state info
* into RPC request processing.
+   *
* @param header The RPC request header builder.
*/
   void updateRequestState(RpcRequestHeaderProto.Builder header);
@@ -59,6 +62,7 @@ public interface AlignmentContext {
   /**
* This is the intended server method call to implement to receive
* client state info during RPC response header processing.
+   *
* @param header The RPC request header.
* @return state id of in the request header.
*/
@@ -66,7 +70,19 @@ public interface AlignmentContext {
 
   /**
* Returns the last seen state id of the alignment context instance.
+   *
* @return the value of the last seen state id.
*/
   long getLastSeenStateId();
+
+  /**
+   * Return true if this method call does need to be synced, false
+   * otherwise. sync meaning server state needs to have caught up with
+   * client state.
+   *
+   * @param protocolName the name of the protocol
+   * @param method the method call to check
+   * @return true if this method is async, false otherwise.
+   */
+  boolean isCoordinatedCall(String protocolName, String method);
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index bf0d68d..1cbf8b8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -707,6 +707,7 @@ public abstract class Server {
 private int priorityLevel;
 // the priority level assigned by scheduler, 0 by default
 private long clientStateId;
+private boolean isCallCoordinated;
 
 Call() {
   this(RpcConstants.INVALID_CALL_ID, RpcConstants.INVALID_RETRY_COUNT,
@@ -738,6 +739,7 @@ public abstract class Server {
   this.traceScope = traceScope;
   this.callerContext = callerContext;
   this.clientStateId = Long.MIN_VALUE;
+  this.isCallCoordinated = false;
 }
 
 @Override
@@ -823,6 +825,14 @@ public abstract class Server {
   this.clientStateId = stateId;
 }
 
+public void markCallCoordinated(boolean flag) {
+  this.isCallCoordinated = flag;
+}
+
+public boolean isCallCoordinated() {
+  return this.isCallCoordinated;
+}
+
 @InterfaceStability.Unstable
 public void deferResponse() {
   this.deferredResponse = true;
@@ -2521,9 +2531,31 @@ public abstract class Server {
 
   // Save the priority level assignment by the scheduler
   call.setPriorityLevel(callQueue.getPriorityLevel(call));
-  if(alignmentContext != null) {
-long stateId = alignmentContext.receiveRequestState(header);
-call.setClientStateId(stateId);
+  if(alignmentContext != null && 

[hadoop] 15/50: HDFS-13789. Reduce logging frequency of QuorumJournalManager#selectInputStreams. Contributed by Erik Krogen.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b41997ceacb43727d9eae723b664bb2b4fe797b1
Author: Chao Sun 
AuthorDate: Wed Aug 8 13:09:39 2018 -0700

HDFS-13789. Reduce logging frequency of 
QuorumJournalManager#selectInputStreams. Contributed by Erik Krogen.
---
 .../apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java| 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index a14e6a0..f4177e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -499,8 +499,10 @@ public class QuorumJournalManager implements 
JournalManager {
 // the cache used for RPC calls is not enabled; fall back to using the
 // streaming mechanism to serve such requests
 if (inProgressOk && inProgressTailingEnabled) {
-  LOG.info("Tailing edits starting from txn ID " + fromTxnId +
-  " via RPC mechanism");
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Tailing edits starting from txn ID " + fromTxnId +
+" via RPC mechanism");
+  }
   try {
 Collection rpcStreams = new ArrayList<>();
 selectRpcInputStreams(rpcStreams, fromTxnId, onlyDurableTxns);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 16/50: HDFS-13767. Add msync server implementation. Contributed by Chen Liang.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 60f0e4df69878fb934074bfca91fb46c3b8b3255
Author: Chen Liang 
AuthorDate: Mon Aug 13 10:30:06 2018 -0700

HDFS-13767. Add msync server implementation. Contributed by Chen Liang.
---
 .../org/apache/hadoop/ipc/AlignmentContext.java|  9 +++-
 .../main/java/org/apache/hadoop/ipc/Server.java| 37 +++--
 .../org/apache/hadoop/hdfs/ClientGSIContext.java   |  6 ++-
 .../ClientNamenodeProtocolTranslatorPB.java|  1 -
 .../hdfs/server/namenode/GlobalStateIdContext.java | 20 +--
 .../hdfs/TestStateAlignmentContextWithHA.java  | 36 -
 .../hdfs/server/namenode/ha/TestObserverNode.java  | 63 ++
 7 files changed, 114 insertions(+), 58 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
index 66d6edc..0e8b960 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
@@ -60,6 +60,13 @@ public interface AlignmentContext {
* This is the intended server method call to implement to receive
* client state info during RPC response header processing.
* @param header The RPC request header.
+   * @return state id of in the request header.
*/
-  void receiveRequestState(RpcRequestHeaderProto header);
+  long receiveRequestState(RpcRequestHeaderProto header);
+
+  /**
+   * Returns the last seen state id of the alignment context instance.
+   * @return the value of the last seen state id.
+   */
+  long getLastSeenStateId();
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index f1dc26b..bf0d68d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -706,6 +706,7 @@ public abstract class Server {
 private boolean deferredResponse = false;
 private int priorityLevel;
 // the priority level assigned by scheduler, 0 by default
+private long clientStateId;
 
 Call() {
   this(RpcConstants.INVALID_CALL_ID, RpcConstants.INVALID_RETRY_COUNT,
@@ -736,6 +737,7 @@ public abstract class Server {
   this.clientId = clientId;
   this.traceScope = traceScope;
   this.callerContext = callerContext;
+  this.clientStateId = Long.MIN_VALUE;
 }
 
 @Override
@@ -813,6 +815,14 @@ public abstract class Server {
   this.priorityLevel = priorityLevel;
 }
 
+public long getClientStateId() {
+  return this.clientStateId;
+}
+
+public void setClientStateId(long stateId) {
+  this.clientStateId = stateId;
+}
+
 @InterfaceStability.Unstable
 public void deferResponse() {
   this.deferredResponse = true;
@@ -2495,11 +2505,6 @@ public abstract class Server {
 }
   }
 
-  if (alignmentContext != null) {
-// Check incoming RPC request's state.
-alignmentContext.receiveRequestState(header);
-  }
-
   CallerContext callerContext = null;
   if (header.hasCallerContext()) {
 callerContext =
@@ -2516,6 +2521,10 @@ public abstract class Server {
 
   // Save the priority level assignment by the scheduler
   call.setPriorityLevel(callQueue.getPriorityLevel(call));
+  if(alignmentContext != null) {
+long stateId = alignmentContext.receiveRequestState(header);
+call.setClientStateId(stateId);
+  }
 
   try {
 internalQueueCall(call);
@@ -2697,6 +2706,24 @@ public abstract class Server {
 TraceScope traceScope = null;
 try {
   final Call call = callQueue.take(); // pop the queue; maybe blocked 
here
+  if (alignmentContext != null && call.getClientStateId() >
+  alignmentContext.getLastSeenStateId()) {
+/*
+ * The call processing should be postponed until the client call's
+ * state id is aligned (>=) with the server state id.
+
+ * NOTE:
+ * Inserting the call back to the queue can change the order of 
call
+ * execution comparing to their original placement into the queue.
+ * This is not a problem, because Hadoop RPC does not have any
+ * constraints on ordering the incoming rpc requests.
+ * In case of Observer, it handles only reads, which are
+ * commutative.
+ */
+//Re-queue the call and continue
+internalQueueCall(call);
+

[hadoop] 05/50: HDFS-13578. [SBN read] Add ReadOnly annotation to methods in ClientProtocol. Contributed by Chao Sun.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit d7e0011b63e24578fb93d4ce5bbeb7fae87802fa
Author: Erik Krogen 
AuthorDate: Fri Jun 1 09:24:38 2018 -0700

HDFS-13578. [SBN read] Add ReadOnly annotation to methods in 
ClientProtocol. Contributed by Chao Sun.
---
 .../hadoop/hdfs/protocol/ClientProtocol.java   |  43 +
 .../hadoop/hdfs/server/namenode/ha/ReadOnly.java   |  47 ++
 .../apache/hadoop/hdfs/protocol/TestReadOnly.java  | 101 +
 3 files changed, 191 insertions(+)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 368f0f1..7923113 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -48,6 +48,7 @@ import 
org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
+import org.apache.hadoop.hdfs.server.namenode.ha.ReadOnly;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
@@ -128,6 +129,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
   @Idempotent
+  @ReadOnly(atimeAffected = true)
   LocatedBlocks getBlockLocations(String src, long offset, long length)
   throws IOException;
 
@@ -137,6 +139,7 @@ public interface ClientProtocol {
* @throws IOException
*/
   @Idempotent
+  @ReadOnly
   FsServerDefaults getServerDefaults() throws IOException;
 
   /**
@@ -277,6 +280,7 @@ public interface ClientProtocol {
* @return All the in-use block storage policies currently.
*/
   @Idempotent
+  @ReadOnly
   BlockStoragePolicy[] getStoragePolicies() throws IOException;
 
   /**
@@ -319,6 +323,7 @@ public interface ClientProtocol {
*   If file/dir src is not found
*/
   @Idempotent
+  @ReadOnly
   BlockStoragePolicy getStoragePolicy(String path) throws IOException;
 
   /**
@@ -685,6 +690,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
   @Idempotent
+  @ReadOnly
   DirectoryListing getListing(String src, byte[] startAfter,
   boolean needLocation) throws IOException;
 
@@ -696,6 +702,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred.
*/
   @Idempotent
+  @ReadOnly
   SnapshottableDirectoryStatus[] getSnapshottableDirListing()
   throws IOException;
 
@@ -776,6 +783,7 @@ public interface ClientProtocol {
* 
*/
   @Idempotent
+  @ReadOnly
   long[] getStats() throws IOException;
 
   /**
@@ -783,6 +791,7 @@ public interface ClientProtocol {
* in the filesystem.
*/
   @Idempotent
+  @ReadOnly
   ReplicatedBlockStats getReplicatedBlockStats() throws IOException;
 
   /**
@@ -790,6 +799,7 @@ public interface ClientProtocol {
* in the filesystem.
*/
   @Idempotent
+  @ReadOnly
   ECBlockGroupStats getECBlockGroupStats() throws IOException;
 
   /**
@@ -799,6 +809,7 @@ public interface ClientProtocol {
* otherwise all datanodes if type is ALL.
*/
   @Idempotent
+  @ReadOnly
   DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
   throws IOException;
 
@@ -806,6 +817,7 @@ public interface ClientProtocol {
* Get a report on the current datanode storages.
*/
   @Idempotent
+  @ReadOnly
   DatanodeStorageReport[] getDatanodeStorageReport(
   HdfsConstants.DatanodeReportType type) throws IOException;
 
@@ -818,6 +830,7 @@ public interface ClientProtocol {
*   a symlink.
*/
   @Idempotent
+  @ReadOnly
   long getPreferredBlockSize(String filename)
   throws IOException;
 
@@ -962,6 +975,7 @@ public interface ClientProtocol {
* cookie returned from the previous call.
*/
   @Idempotent
+  @ReadOnly
   CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
   throws IOException;
 
@@ -997,6 +1011,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
   @Idempotent
+  @ReadOnly
   HdfsFileStatus getFileInfo(String src) throws IOException;
 
   /**
@@ -1011,6 +1026,7 @@ public interface ClientProtocol {
* @throws IOException If an I/O error occurred
*/
   @Idempotent
+  @ReadOnly
   boolean isFileClosed(String src) throws IOException;
 
   /**
@@ -1027,6 +1043,7 @@ public interface ClientProtocol {
* @throws 

[hadoop] 03/50: HDFS-13331. [SBN read] Add lastSeenStateId to RpcRequestHeader. Contributed by Plamen Jeliazkov.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit c2585f7e281b065ce7ce8f81b8bfb6f9c7fd8727
Author: Erik Krogen 
AuthorDate: Wed Apr 4 15:42:39 2018 -0700

HDFS-13331. [SBN read] Add lastSeenStateId to RpcRequestHeader. Contributed 
by Plamen Jeliazkov.
---
 .../org/apache/hadoop/ipc/AlignmentContext.java| 14 
 .../main/java/org/apache/hadoop/ipc/Client.java|  2 +-
 .../main/java/org/apache/hadoop/ipc/Server.java|  5 ++
 .../java/org/apache/hadoop/util/ProtoUtil.java | 13 
 .../hadoop-common/src/main/proto/RpcHeader.proto   |  1 +
 .../org/apache/hadoop/hdfs/ClientGCIContext.java   | 30 +---
 .../java/org/apache/hadoop/hdfs/DFSClient.java | 10 ++-
 .../hdfs/server/namenode/GlobalStateIdContext.java | 26 ++-
 .../hadoop/hdfs/TestStateAlignmentContext.java | 89 +-
 9 files changed, 173 insertions(+), 17 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
index f952325..66d6edc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AlignmentContext.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ipc;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
 
 /**
@@ -48,4 +49,17 @@ public interface AlignmentContext {
*/
   void receiveResponseState(RpcResponseHeaderProto header);
 
+  /**
+   * This is the intended client method call to pull last seen state info
+   * into RPC request processing.
+   * @param header The RPC request header builder.
+   */
+  void updateRequestState(RpcRequestHeaderProto.Builder header);
+
+  /**
+   * This is the intended server method call to implement to receive
+   * client state info during RPC response header processing.
+   * @param header The RPC request header.
+   */
+  void receiveRequestState(RpcRequestHeaderProto header);
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index bb09799..b9cac6f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1112,7 +1112,7 @@ public class Client implements AutoCloseable {
   // Items '1' and '2' are prepared here. 
   RpcRequestHeaderProto header = ProtoUtil.makeRpcRequestHeader(
   call.rpcKind, OperationProto.RPC_FINAL_PACKET, call.id, call.retry,
-  clientId);
+  clientId, alignmentContext);
 
   final ResponseBuffer buf = new ResponseBuffer();
   header.writeDelimitedTo(buf);
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index ada458e..f32a64b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -2481,6 +2481,11 @@ public abstract class Server {
 }
   }
 
+  if (alignmentContext != null) {
+// Check incoming RPC request's state.
+alignmentContext.receiveRequestState(header);
+  }
+
   CallerContext callerContext = null;
   if (header.hasCallerContext()) {
 callerContext =
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
index 1a5acba..9a0b05c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.util;
 import java.io.DataInput;
 import java.io.IOException;
 
+import org.apache.hadoop.ipc.AlignmentContext;
 import org.apache.hadoop.ipc.CallerContext;
 import org.apache.hadoop.ipc.RPC;
 import 
org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
@@ -165,6 +166,13 @@ public abstract class ProtoUtil {
   public static RpcRequestHeaderProto makeRpcRequestHeader(RPC.RpcKind rpcKind,
   RpcRequestHeaderProto.OperationProto operation, int callId,
   int retryCount, byte[] uuid) {
+return 

[hadoop] 06/50: HDFS-13399. [SBN read] Make Client field AlignmentContext non-static. Contributed by Plamen Jeliazkov.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit d502e48a7cbdb12614d24d79d38f9af60dc85db6
Author: Plamen Jeliazkov 
AuthorDate: Mon Jun 4 14:58:47 2018 -0700

HDFS-13399. [SBN read] Make Client field AlignmentContext non-static. 
Contributed by Plamen Jeliazkov.
---
 .../main/java/org/apache/hadoop/ipc/Client.java|  49 ++-
 .../org/apache/hadoop/ipc/ProtobufRpcEngine.java   |  15 +-
 .../src/main/java/org/apache/hadoop/ipc/RPC.java   |  39 +-
 .../main/java/org/apache/hadoop/ipc/RpcEngine.java |   3 +-
 .../main/java/org/apache/hadoop/ipc/Server.java|  29 +-
 .../org/apache/hadoop/ipc/WritableRpcEngine.java   |  14 +-
 .../test/java/org/apache/hadoop/ipc/TestRPC.java   |   5 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java |   9 -
 .../apache/hadoop/hdfs/NameNodeProxiesClient.java  |  12 +-
 .../ha/AbstractNNFailoverProxyProvider.java|   7 +-
 .../server/namenode/ha/ClientHAProxyFactory.java   |  12 +
 .../hadoop/hdfs/TestStateAlignmentContext.java | 212 --
 .../hdfs/TestStateAlignmentContextWithHA.java  | 467 +
 13 files changed, 620 insertions(+), 253 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index b9cac6f..51ab669 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -103,12 +103,6 @@ public class Client implements AutoCloseable {
   return false;
 }
   };
-  private static AlignmentContext alignmentContext;
-
-  /** Set alignment context to use to fetch state alignment info from RPC. */
-  public static void setAlignmentContext(AlignmentContext ac) {
-alignmentContext = ac;
-  }
 
   @SuppressWarnings("unchecked")
   @Unstable
@@ -344,6 +338,7 @@ public class Client implements AutoCloseable {
 final RPC.RpcKind rpcKind;  // Rpc EngineKind
 boolean done;   // true when call is done
 private final Object externalHandler;
+private AlignmentContext alignmentContext;
 
 private Call(RPC.RpcKind rpcKind, Writable param) {
   this.rpcKind = rpcKind;
@@ -385,6 +380,15 @@ public class Client implements AutoCloseable {
   }
 }
 
+/**
+ * Set an AlignmentContext for the call to update when call is done.
+ *
+ * @param ac alignment context to update.
+ */
+public synchronized void setAlignmentContext(AlignmentContext ac) {
+  this.alignmentContext = ac;
+}
+
 /** Set the exception when there is an error.
  * Notify the caller the call is done.
  * 
@@ -1112,7 +1116,7 @@ public class Client implements AutoCloseable {
   // Items '1' and '2' are prepared here. 
   RpcRequestHeaderProto header = ProtoUtil.makeRpcRequestHeader(
   call.rpcKind, OperationProto.RPC_FINAL_PACKET, call.id, call.retry,
-  clientId, alignmentContext);
+  clientId, call.alignmentContext);
 
   final ResponseBuffer buf = new ResponseBuffer();
   header.writeDelimitedTo(buf);
@@ -1189,9 +1193,9 @@ public class Client implements AutoCloseable {
   Writable value = packet.newInstance(valueClass, conf);
   final Call call = calls.remove(callId);
   call.setRpcResponse(value);
-}
-if (alignmentContext != null) {
-  alignmentContext.receiveResponseState(header);
+  if (call.alignmentContext != null) {
+call.alignmentContext.receiveResponseState(header);
+  }
 }
 // verify that packet length was correct
 if (packet.remaining() > 0) {
@@ -1368,7 +1372,15 @@ public class Client implements AutoCloseable {
   ConnectionId remoteId, AtomicBoolean fallbackToSimpleAuth)
   throws IOException {
 return call(rpcKind, rpcRequest, remoteId, RPC.RPC_SERVICE_CLASS_DEFAULT,
-  fallbackToSimpleAuth);
+  fallbackToSimpleAuth, null);
+  }
+
+  public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest,
+  ConnectionId remoteId, AtomicBoolean fallbackToSimpleAuth,
+  AlignmentContext alignmentContext)
+  throws IOException {
+return call(rpcKind, rpcRequest, remoteId, RPC.RPC_SERVICE_CLASS_DEFAULT,
+fallbackToSimpleAuth, alignmentContext);
   }
 
   private void checkAsyncCall() throws IOException {
@@ -1385,6 +1397,14 @@ public class Client implements AutoCloseable {
 }
   }
 
+  Writable call(RPC.RpcKind rpcKind, Writable rpcRequest,
+ConnectionId remoteId, int serviceClass,
+AtomicBoolean fallbackToSimpleAuth)
+  throws IOException {
+return call(rpcKind, rpcRequest, remoteId, serviceClass,
+fallbackToSimpleAuth, null);
+  }
+
   /**

[hadoop] 04/50: HDFS-13286. [SBN read] Add haadmin commands to transition between standby and observer. Contributed by Chao Sun.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 849e1f018e4e315a5475bed20b9a14a0437c84cf
Author: Erik Krogen 
AuthorDate: Fri May 4 12:22:12 2018 -0700

HDFS-13286. [SBN read] Add haadmin commands to transition between standby 
and observer. Contributed by Chao Sun.
---
 .../org/apache/hadoop/ha/FailoverController.java   |  2 +-
 .../main/java/org/apache/hadoop/ha/HAAdmin.java| 42 +
 .../org/apache/hadoop/ha/HAServiceProtocol.java| 18 +
 .../apache/hadoop/ha/HAServiceProtocolHelper.java  |  9 +
 .../java/org/apache/hadoop/ha/HAServiceTarget.java |  7 
 .../HAServiceProtocolClientSideTranslatorPB.java   | 16 
 .../HAServiceProtocolServerSideTranslatorPB.java   | 20 ++
 .../src/main/proto/HAServiceProtocol.proto | 20 ++
 .../java/org/apache/hadoop/ha/DummyHAService.java  | 18 -
 .../java/org/apache/hadoop/ha/MiniZKFCCluster.java |  4 ++
 .../resolver/FederationNamenodeServiceState.java   |  3 ++
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java|  4 ++
 .../hdfs/server/datanode/BPServiceActor.java   |  2 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  3 +-
 .../hadoop/hdfs/server/namenode/NameNode.java  | 27 -
 .../hdfs/server/namenode/NameNodeRpcServer.java|  8 
 .../hdfs/server/namenode/ha/StandbyState.java  | 12 +++---
 .../hadoop/hdfs/tools/NNHAServiceTarget.java   |  5 +++
 .../hadoop-hdfs/src/main/proto/HdfsServer.proto|  1 +
 .../apache/hadoop/hdfs/tools/TestDFSHAAdmin.java   |  6 +++
 .../hdfs/tools/TestDFSHAAdminMiniCluster.java  | 44 ++
 .../yarn/server/resourcemanager/AdminService.java  |  7 
 22 files changed, 258 insertions(+), 20 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
index b86ae29..4fc52d5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
@@ -129,7 +129,7 @@ public class FailoverController {
 
 if (!toSvcStatus.getState().equals(HAServiceState.STANDBY)) {
   throw new FailoverFailedException(
-  "Can't failover to an active service");
+  "Can't failover to an " + toSvcStatus.getState() + " service");
 }
 
 if (!toSvcStatus.isReadyToBecomeActive()) {
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
index 9b7d7ba..61700f9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
@@ -72,6 +72,9 @@ public abstract class HAAdmin extends Configured implements 
Tool {
 new UsageInfo("[--"+FORCEACTIVE+"] ", "Transitions the 
service into Active state"))
 .put("-transitionToStandby",
 new UsageInfo("", "Transitions the service into Standby 
state"))
+  .put("-transitionToObserver",
+  new UsageInfo("",
+  "Transitions the service into Observer state"))
 .put("-failover",
 new UsageInfo("[--"+FORCEFENCE+"] [--"+FORCEACTIVE+"]  
",
 "Failover from the first service to the second.\n" +
@@ -221,6 +224,28 @@ public abstract class HAAdmin extends Configured 
implements Tool {
 HAServiceProtocolHelper.transitionToStandby(proto, createReqInfo());
 return 0;
   }
+
+  private int transitionToObserver(final CommandLine cmd)
+  throws IOException, ServiceFailedException {
+String[] argv = cmd.getArgs();
+if (argv.length != 1) {
+  errOut.println("transitionToObserver: incorrect number of arguments");
+  printUsage(errOut, "-transitionToObserver");
+  return -1;
+}
+
+HAServiceTarget target = resolveTarget(argv[0]);
+if (!checkSupportObserver(target)) {
+  return -1;
+}
+if (!checkManualStateManagementOK(target)) {
+  return -1;
+}
+HAServiceProtocol proto = target.getProxy(getConf(), 0);
+HAServiceProtocolHelper.transitionToObserver(proto, createReqInfo());
+return 0;
+  }
+
   /**
* Ensure that we are allowed to manually manage the HA state of the target
* service. If automatic failover is configured, then the automatic
@@ -249,6 +274,21 @@ public abstract class HAAdmin extends Configured 
implements Tool {
 return true;
   }
 
+  /**
+   * Check if the target supports the Observer state.
+   * @param target the target to check
+   * @return true if the target support Observer state, false otherwise.
+   */
+  private boolean 

[hadoop] 13/50: HDFS-13610. [SBN read] Edit Tail Fast Path Part 4: Cleanup. Integration test, documentation, remove unnecessary dummy sync, minors fixups. Contributed by Erik Krogen.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 9976b7bcbe9085e77e9a80419508a7672efdb8d7
Author: Erik Krogen 
AuthorDate: Fri May 11 13:23:38 2018 -0700

HDFS-13610. [SBN read] Edit Tail Fast Path Part 4: Cleanup. Integration 
test, documentation, remove unnecessary dummy sync, minors fixups. Contributed 
by Erik Krogen.
---
 .../hdfs/qjournal/client/QuorumJournalManager.java |  15 +-
 .../hdfs/qjournal/client/QuorumOutputStream.java   |  13 +-
 .../hadoop/hdfs/qjournal/server/Journal.java   |   6 +
 .../hdfs/qjournal/server/JournaledEditsCache.java  |  63 +---
 .../site/markdown/HDFSHighAvailabilityWithQJM.md   |  35 +
 .../qjournal/client/TestQuorumJournalManager.java  |   4 +
 .../client/TestQuorumJournalManagerUnit.java   |   2 +
 .../hdfs/qjournal/server/JournalTestUtil.java  |  48 ++
 .../namenode/ha/TestStandbyInProgressTail.java | 167 +
 9 files changed, 284 insertions(+), 69 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index f9d96b0..a14e6a0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -77,6 +77,8 @@ public class QuorumJournalManager implements JournalManager {
   // Maximum number of transactions to fetch at a time when using the
   // RPC edit fetch mechanism
   private final int maxTxnsPerRpc;
+  // Whether or not in-progress tailing is enabled in the configuration
+  private final boolean inProgressTailingEnabled;
   // Timeouts for which the QJM will wait for each of the following actions.
   private final int startSegmentTimeoutMs;
   private final int prepareRecoveryTimeoutMs;
@@ -139,6 +141,9 @@ public class QuorumJournalManager implements JournalManager 
{
 conf.getInt(QJM_RPC_MAX_TXNS_KEY, QJM_RPC_MAX_TXNS_DEFAULT);
 Preconditions.checkArgument(maxTxnsPerRpc > 0,
 "Must specify %s greater than 0!", QJM_RPC_MAX_TXNS_KEY);
+this.inProgressTailingEnabled = conf.getBoolean(
+DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY,
+DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_DEFAULT);
 // Configure timeouts.
 this.startSegmentTimeoutMs = conf.getInt(
 DFSConfigKeys.DFS_QJOURNAL_START_SEGMENT_TIMEOUT_KEY,
@@ -420,11 +425,8 @@ public class QuorumJournalManager implements 
JournalManager {
 layoutVersion);
 loggers.waitForWriteQuorum(q, startSegmentTimeoutMs,
 "startLogSegment(" + txId + ")");
-boolean updateCommittedTxId = conf.getBoolean(
-DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY,
-DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_DEFAULT);
 return new QuorumOutputStream(loggers, txId, outputBufferCapacity,
-writeTxnsTimeoutMs, updateCommittedTxId);
+writeTxnsTimeoutMs);
   }
 
   @Override
@@ -493,7 +495,10 @@ public class QuorumJournalManager implements 
JournalManager {
   public void selectInputStreams(Collection streams,
   long fromTxnId, boolean inProgressOk,
   boolean onlyDurableTxns) throws IOException {
-if (inProgressOk) {
+// Some calls will use inProgressOK to get in-progress edits even if
+// the cache used for RPC calls is not enabled; fall back to using the
+// streaming mechanism to serve such requests
+if (inProgressOk && inProgressTailingEnabled) {
   LOG.info("Tailing edits starting from txn ID " + fromTxnId +
   " via RPC mechanism");
   try {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java
index 3ffcd3e..e094b21 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumOutputStream.java
@@ -33,17 +33,15 @@ class QuorumOutputStream extends EditLogOutputStream {
   private EditsDoubleBuffer buf;
   private final long segmentTxId;
   private final int writeTimeoutMs;
-  private final boolean updateCommittedTxId;
 
   public QuorumOutputStream(AsyncLoggerSet loggers,
   long txId, int outputBufferCapacity,
-  int writeTimeoutMs, boolean updateCommittedTxId) throws IOException {
+  int writeTimeoutMs) throws IOException {
 super();
 this.buf = new EditsDoubleBuffer(outputBufferCapacity);
 this.loggers = loggers;
 this.segmentTxId = txId;
 this.writeTimeoutMs = 

[hadoop] branch branch-3.0 updated (6aa76ea -> b530301)

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a change to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 6aa76ea  YARN-9668. UGI conf doesn't read user overridden 
configurations on RM and NM startup. (Contributed by Jonanthan Hung)
 new 44d8b43  HDFS-12975. [SBN read] Changes to the NameNode to support 
reads from standby. Contributed by Chao Sun.
 new ccad9ce  HDFS-12977. [SBN read] Add stateId to RPC headers. 
Contributed by Plamen Jeliazkov.
 new c2585f7  HDFS-13331. [SBN read] Add lastSeenStateId to 
RpcRequestHeader. Contributed by Plamen Jeliazkov.
 new 849e1f0  HDFS-13286. [SBN read] Add haadmin commands to transition 
between standby and observer. Contributed by Chao Sun.
 new d7e0011  HDFS-13578. [SBN read] Add ReadOnly annotation to methods in 
ClientProtocol. Contributed by Chao Sun.
 new d502e48  HDFS-13399. [SBN read] Make Client field AlignmentContext 
non-static. Contributed by Plamen Jeliazkov.
 new eacf432  HDFS-13607. [SBN read] Edit Tail Fast Path Part 1: Enhance 
JournalNode with an in-memory cache of recent edit transactions. Contributed by 
Erik Krogen.
 new db9cec6  HDFS-13608. [SBN read] Edit Tail Fast Path Part 2: Add 
ability for JournalNode to serve edits via RPC. Contributed by Erik Krogen.
 new f847983  HDFS-13609. [SBN read] Edit Tail Fast Path Part 3: 
NameNode-side changes to support tailing edits via RPC. Contributed by Erik 
Krogen.
 new c02f4cc  HDFS-13706. [SBN read] Rename client context to 
ClientGSIContext. Contributed by Konstantin Shvachko.
 new 13e8692  HDFS-12976. [SBN read] Introduce ObserverReadProxyProvider. 
Contributed by Chao Sun.
 new ac0cdb0  HDFS-13665. [SBN read] Move RPC response serialization into 
Server.doResponse(). Contributed by Plamen Jeliazkov.
 new 9976b7b  HDFS-13610. [SBN read] Edit Tail Fast Path Part 4: Cleanup. 
Integration test, documentation, remove unnecessary dummy sync, minors fixups. 
Contributed by Erik Krogen.
 new 0f811b0  HDFS-13688. [SBN read] Introduce msync API call. Contributed 
by Chen Liang.
 new b41997c  HDFS-13789. Reduce logging frequency of 
QuorumJournalManager#selectInputStreams. Contributed by Erik Krogen.
 new 60f0e4df HDFS-13767. Add msync server implementation. Contributed by 
Chen Liang.
 new ad80383  HDFS-13851. Remove AlignmentContext from 
AbstractNNFailoverProxyProvider. Contributed by Konstantin Shvachko.
 new be8beda  HDFS-13782. ObserverReadProxyProvider should work with 
IPFailoverProxyProvider. Contributed by Konstantin Shvachko.
 new 34b05a2  HDFS-13779. [SBN read] Implement proper failover and observer 
failure handling logic for for ObserverReadProxyProvider. Contributed by Erik 
Krogen.
 new 613c9e4  HDFS-13880. Add mechanism to allow certain RPC calls to 
bypass sync. Contributed by Chen Liang.
 new ea2d862  HDFS-13778. [SBN read] TestStateAlignmentContextWithHA should 
use real ObserverReadProxyProvider instead of AlignmentContextProxyProvider. 
Contributed by Konstantin Shvachko and Plamen Jeliazkov.
 new 3273736  HDFS-13749. [SBN read] Use getServiceStatus to discover 
observer namenodes. Contributed by Chao Sun.
 new 894f095  HDFS-13898. [SBN read] Throw retriable exception for 
getBlockLocations when ObserverNameNode is in safemode. Contributed by Chao Sun.
 new 56af83c  HDFS-13791. Limit logging frequency of edit tail related 
statements. Contributed by Erik Krogen.
 new 28b11b0  HDFS-13961. [SBN read] TestObserverNode refactoring. 
Contributed by Konstantin Shvachko.
 new 7b425c8  HDFS-13523. Support observer nodes in MiniDFSCluster. 
Contributed by Konstantin Shvachko.
 new ca565ef  HDFS-13925. Unit Test for transitioning between different 
states. Contributed by Sherwood Zheng.
 new 1189761  HDFS-13924. [SBN read] Handle BlockMissingException when 
reading from observer. Contributed by Chao Sun.
 new 4ce7f9f  HDFS-14016. [SBN read] ObserverReadProxyProvider should 
enable observer read by default. Contributed by Chen Liang.
 new 683daed  HDFS-14035. NN status discovery does not leverage delegation 
token. Contributed by Chen Liang.
 new 96cdd13  HDFS-14017. [SBN read] 
ObserverReadProxyProviderWithIPFailover should work with HA configuration. 
Contributed by Chen Liang.
 new 6916ee7  HDFS-14067. [SBN read] Allow manual failover between standby 
and observer. Contributed by Chao Sun.
 new 54a1c66  HDFS-14094. [SBN read] Fix the order of logging arguments in 
ObserverReadProxyProvider. Contributed by Ayush Saxena.
 new 8769e6f  HDFS-14120. [SBN read] ORFPP should also clone DT for the 
virtual IP. Contributed by Chen Liang.
 new b8df864  HDFS-14131. [SBN read] Create user guide for Consistent Reads 
from Observer feature. Contributed by Chao Sun.
 new 82f68a4  HDFS-14142. Move ipfailover config key out of 
HdfsClientConfigKeys. Contributed by Chen Liang.
 new 

[hadoop] 10/50: HDFS-13706. [SBN read] Rename client context to ClientGSIContext. Contributed by Konstantin Shvachko.

2019-07-25 Thread cliang
This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit c02f4ccba19c9399e6421f6df2411bc57a5ce86b
Author: Konstantin V Shvachko 
AuthorDate: Fri Jun 29 15:50:37 2018 -0700

HDFS-13706. [SBN read] Rename client context to ClientGSIContext. 
Contributed by Konstantin Shvachko.
---
 ...ClientGCIContext.java => ClientGSIContext.java} |  4 +++-
 .../hdfs/TestStateAlignmentContextWithHA.java  | 28 +++---
 2 files changed, 17 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientGCIContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientGSIContext.java
similarity index 96%
rename from 
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientGCIContext.java
rename to 
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientGSIContext.java
index 0d0bd25..6f69eed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientGCIContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientGSIContext.java
@@ -27,12 +27,14 @@ import 
org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
 import java.util.concurrent.atomic.LongAccumulator;
 
 /**
+ * Global State Id context for the client.
+ * 
  * This is the client side implementation responsible for receiving
  * state alignment info from server(s).
  */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
-class ClientGCIContext implements AlignmentContext {
+class ClientGSIContext implements AlignmentContext {
 
   private final LongAccumulator lastSeenStateId =
   new LongAccumulator(Math::max, Long.MIN_VALUE);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
index 3437bb0..00ed6bc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
@@ -67,11 +67,11 @@ public class TestStateAlignmentContextWithHA {
   private static final int NUMFILES = 300;
   private static final Configuration CONF = new HdfsConfiguration();
   private static final String NAMESERVICE = "nameservice";
-  private static final List AC_LIST = new ArrayList<>();
+  private static final List AC_LIST = new ArrayList<>();
 
   private static MiniDFSCluster cluster;
   private static List clients;
-  private static ClientGCIContext spy;
+  private static ClientGSIContext spy;
 
   private DistributedFileSystem dfs;
   private int active = 0;
@@ -80,7 +80,7 @@ public class TestStateAlignmentContextWithHA {
   static class AlignmentContextProxyProvider
   extends ConfiguredFailoverProxyProvider {
 
-private ClientGCIContext alignmentContext;
+private ClientGSIContext alignmentContext;
 
 public AlignmentContextProxyProvider(
 Configuration conf, URI uri, Class xface,
@@ -89,14 +89,14 @@ public class TestStateAlignmentContextWithHA {
 
   // Create and set AlignmentContext in HAProxyFactory.
   // All proxies by factory will now have AlignmentContext assigned.
-  this.alignmentContext = (spy != null ? spy : new ClientGCIContext());
+  this.alignmentContext = (spy != null ? spy : new ClientGSIContext());
   ((ClientHAProxyFactory) factory).setAlignmentContext(alignmentContext);
 
   AC_LIST.add(alignmentContext);
 }
 
 @Override // AbstractNNFailoverProxyProvider
-public synchronized ClientGCIContext getAlignmentContext() {
+public synchronized ClientGSIContext getAlignmentContext() {
   return this.alignmentContext;
 }
   }
@@ -104,7 +104,7 @@ public class TestStateAlignmentContextWithHA {
   static class SpyConfiguredContextProxyProvider
   extends ConfiguredFailoverProxyProvider {
 
-private ClientGCIContext alignmentContext;
+private ClientGSIContext alignmentContext;
 
 public SpyConfiguredContextProxyProvider(
 Configuration conf, URI uri, Class xface,
@@ -112,7 +112,7 @@ public class TestStateAlignmentContextWithHA {
   super(conf, uri, xface, factory);
 
   // Create but DON'T set in HAProxyFactory.
-  this.alignmentContext = (spy != null ? spy : new ClientGCIContext());
+  this.alignmentContext = (spy != null ? spy : new ClientGSIContext());
 
   AC_LIST.add(alignmentContext);
 }
@@ -180,7 +180,7 @@ public class TestStateAlignmentContextWithHA {
 
 try (DistributedFileSystem clearDfs =
  (DistributedFileSystem) FileSystem.get(confCopy)) {
-  ClientGCIContext clientState = getContext(1);
+  

[hadoop] branch ozone-0.4.1 updated: HDDS-1858. mTLS support for Ozone is not correct. Contributed by Siddharth Wagle.

2019-07-25 Thread xyao
This is an automated email from the ASF dual-hosted git repository.

xyao pushed a commit to branch ozone-0.4.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4.1 by this push:
 new 0b92dec  HDDS-1858. mTLS support for Ozone is not correct. Contributed 
by Siddharth Wagle.
0b92dec is described below

commit 0b92dec7f0ad99d937b5ff56a4a0a3bd629c05be
Author: Xiaoyu Yao 
AuthorDate: Thu Jul 25 09:52:02 2019 -0700

HDDS-1858. mTLS support for Ozone is not correct. Contributed by Siddharth 
Wagle.

(cherry picked from commit b41ef61ebcc8a7ab5449390756b6361ea918a79c)
---
 hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java 
b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
index a63d18a..557815b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
@@ -202,11 +202,11 @@ public interface RatisHelper {
   static GrpcTlsConfig createTlsClientConfig(SecurityConfig conf) {
 if (conf.isGrpcTlsEnabled()) {
   if (conf.isGrpcMutualTlsRequired()) {
-return new GrpcTlsConfig(
-null, null, conf.getTrustStoreFile(), false);
-  } else {
 return new GrpcTlsConfig(conf.getClientPrivateKeyFile(),
 conf.getClientCertChainFile(), conf.getTrustStoreFile(), true);
+  } else {
+return new GrpcTlsConfig(
+null, null, conf.getTrustStoreFile(), false);
   }
 }
 return null;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1858. mTLS support for Ozone is not correct. Contributed by Siddharth Wagle.

2019-07-25 Thread xyao
This is an automated email from the ASF dual-hosted git repository.

xyao pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b41ef61  HDDS-1858. mTLS support for Ozone is not correct. Contributed 
by Siddharth Wagle.
b41ef61 is described below

commit b41ef61ebcc8a7ab5449390756b6361ea918a79c
Author: Xiaoyu Yao 
AuthorDate: Thu Jul 25 09:52:02 2019 -0700

HDDS-1858. mTLS support for Ozone is not correct. Contributed by Siddharth 
Wagle.
---
 hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java 
b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
index a63d18a..557815b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
@@ -202,11 +202,11 @@ public interface RatisHelper {
   static GrpcTlsConfig createTlsClientConfig(SecurityConfig conf) {
 if (conf.isGrpcTlsEnabled()) {
   if (conf.isGrpcMutualTlsRequired()) {
-return new GrpcTlsConfig(
-null, null, conf.getTrustStoreFile(), false);
-  } else {
 return new GrpcTlsConfig(conf.getClientPrivateKeyFile(),
 conf.getClientCertChainFile(), conf.getTrustStoreFile(), true);
+  } else {
+return new GrpcTlsConfig(
+null, null, conf.getTrustStoreFile(), false);
   }
 }
 return null;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1842. Implement S3 Abort MPU request to use Cache and DoubleBuffer. (#1155)

2019-07-25 Thread bharat
This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3c4159f  HDDS-1842. Implement S3 Abort MPU request to use Cache and 
DoubleBuffer. (#1155)
3c4159f is described below

commit 3c4159ff3def152b81398c968285c08e5bf9d6f1
Author: Bharat Viswanadham 
AuthorDate: Thu Jul 25 08:51:11 2019 -0700

HDDS-1842. Implement S3 Abort MPU request to use Cache and DoubleBuffer. 
(#1155)
---
 .../org/apache/hadoop/ozone/audit/OMAction.java|   1 +
 .../ozone/om/ratis/OzoneManagerDoubleBuffer.java   |   3 +
 .../om/ratis/utils/OzoneManagerRatisUtils.java |   3 +
 .../multipart/S3MultipartUploadAbortRequest.java   | 173 +
 .../S3InitiateMultipartUploadResponse.java |  11 ++
 .../multipart/S3MultipartUploadAbortResponse.java  |  83 ++
 .../OzoneManagerHARequestHandlerImpl.java  |   1 +
 .../ozone/om/request/TestOMRequestUtils.java   |  18 +++
 .../s3/multipart/TestS3MultipartRequest.java   |  40 -
 .../TestS3MultipartUploadAbortRequest.java | 158 +++
 .../multipart/TestS3InitiateMultipartResponse.java |  55 ---
 .../TestS3InitiateMultipartUploadResponse.java |  45 +-
 .../s3/multipart/TestS3MultipartResponse.java  | 143 +
 .../TestS3MultipartUploadAbortResponse.java| 129 +++
 14 files changed, 764 insertions(+), 99 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
index 459663b..e72beff 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
@@ -56,6 +56,7 @@ public enum OMAction implements AuditAction {
   COMMIT_MULTIPART_UPLOAD_PARTKEY,
   COMPLETE_MULTIPART_UPLOAD,
   LIST_MULTIPART_UPLOAD_PARTS,
+  ABORT_MULTIPART_UPLOAD,
 
   //FS Actions
   GET_FILE_STATUS,
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
index e54f82d..2bde3ad 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
@@ -176,6 +176,9 @@ public class OzoneManagerDoubleBuffer {
 omMetadataManager.getKeyTable().cleanupCache(lastRatisTransactionIndex);
 
omMetadataManager.getDeletedTable().cleanupCache(lastRatisTransactionIndex);
 omMetadataManager.getS3Table().cleanupCache(lastRatisTransactionIndex);
+omMetadataManager.getMultipartInfoTable().cleanupCache(
+lastRatisTransactionIndex);
+
   }
 
   /**
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 84a6530..4d99b66 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -34,6 +34,7 @@ import 
org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest;
 import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest;
 import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketDeleteRequest;
 import 
org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequest;
+import 
org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequest;
 import 
org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequest;
 import org.apache.hadoop.ozone.om.request.volume.OMVolumeCreateRequest;
 import org.apache.hadoop.ozone.om.request.volume.OMVolumeDeleteRequest;
@@ -111,6 +112,8 @@ public final class OzoneManagerRatisUtils {
   return new S3InitiateMultipartUploadRequest(omRequest);
 case CommitMultiPartUpload:
   return new S3MultipartUploadCommitPartRequest(omRequest);
+case AbortMultiPartUpload:
+  return new S3MultipartUploadAbortRequest(omRequest);
 default:
   // TODO: will update once all request types are implemented.
   return null;
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java
new file mode 100644
index 000..cb98132
--- /dev/null
+++ 

[hadoop] branch trunk updated: HDDS-1749 : Ozone Client should randomize the list of nodes in pipeline for reads. (#1124)

2019-07-25 Thread shashikant
This is an automated email from the ASF dual-hosted git repository.

shashikant pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ba43233  HDDS-1749 : Ozone Client should randomize the list of nodes 
in pipeline for reads. (#1124)
ba43233 is described below

commit ba43233451128118e999f51f7c52f2d50993b56e
Author: avijayanhwx <14299376+avijayan...@users.noreply.github.com>
AuthorDate: Thu Jul 25 07:54:46 2019 -0700

HDDS-1749 : Ozone Client should randomize the list of nodes in pipeline for 
reads. (#1124)
---
 .../src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java   | 4 
 1 file changed, 4 insertions(+)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index eee813f..9f99ab5 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -54,6 +54,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -269,6 +270,9 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   datanodeList = pipeline.getNodesInOrder();
 } else {
   datanodeList = pipeline.getNodes();
+  // Shuffle datanode list so that clients do not read in the same order
+  // every time.
+  Collections.shuffle(datanodeList);
 }
 for (DatanodeDetails dn : datanodeList) {
   try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org